ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a4455ca7e5476b646025c80ef7d584873d7cd5f
# Generated by Django 2.1 on 2019-06-09 23:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main_app', '0025_auto_20190605_1841'), ] operations = [ migrations.AddField( model_name='devices', name='device_type', field=models.CharField(choices=[('R', 'Router'), ('S', 'Switch'), ('F', 'Firewall')], default=('R', 'Router'), max_length=10), ), ]
py
1a4456f28f12fb4e05e16b17c683d980ab9e7be0
from lib.solutions.CHK.checkout_solution import checkout class TestSum(): """ These are my tests, they were copied from the terminal: they all pass """ def test_all_options(self): assert checkout('') == 0 assert checkout('A') == 50 assert checkout('B') == 30 assert checkout('C') == 20 assert checkout('D') == 15 assert checkout('a') == -1 assert checkout('-') == -1 assert checkout('ABCa') == -1 assert checkout('AxA') == -1 assert checkout('ABCD') == 115 assert checkout('A') == 50 assert checkout('AA') == 100 assert checkout('AAA') == 130 assert checkout('AAAA') == 130+50 assert checkout('AAAAAA') == 250 assert checkout('B') == 30 assert checkout('BB') == 45 assert checkout('BBB') == 75 assert checkout('BBBB') == 90 assert checkout('ABCDABCD') == 2*(50 + 20 + 15) + 45 assert checkout('BABDDCAC') == 45 + 2*15 + 2*50 + 2*20 assert checkout('AAABB') == 130+45 # assert checkout('ABCDCBAABCABBAAA') == 505 assert checkout('AAAAA') == 200
py
1a44573bef836ccd8a5e3c072c19029c070025f7
from sklearn.model_selection import StratifiedKFold from sklearn.base import clone def k_fold_cross_validation(sgd_clf, X_train, y_train_nb): skfolds = StratifiedKFold(n_splits=3, random_state=42) for train_index, test_index in skfolds.split(X_train, y_train_nb): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = (y_train_nb[train_index]) X_test_folds = X_train[test_index] y_test_folds = (y_train_nb[test_index]) clone_clf.fit(X_train_folds, y_train_folds) y_pred = clone_clf.predict(X_test_folds) n_correct = sum(y_pred == y_test_folds) print(n_correct / len(y_pred)) from sklearn.base import BaseEstimator import numpy as np class NeverNbClassifier(BaseEstimator): def fit(self, X, y=None): pass def predict(self, X): return np.zeros((len(X), 1), dtype=bool)
py
1a445754ccc015285854e52fadef25e8823d2f8a
# -*- coding: utf-8 -*- from scout.commands import cli from scout.server.extensions import store def test_load_institute(mock_app, institute_obj): """Testing the load institute cli command""" runner = mock_app.test_cli_runner() assert runner # One institute is preloaded into populated database assert store.institute_collection.find().count() == 1 # remove it store.institute_collection.find_one_and_delete({'_id':institute_obj['_id']}) assert store.institute_collection.find().count() == 0 # and re-load it using the CLI command: result = runner.invoke(cli, ['load', 'institute', '-i', institute_obj['_id'], '-d', institute_obj['display_name'], '-s', institute_obj['sanger_recipients']]) # CLI command should be exit with no errors assert result.exit_code == 0 # and institute should be in database assert store.institute_collection.find().count() == 1
py
1a44575c716efd82b27a856ef13e9db786d416ca
from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import ray.cloudpickle as cloudpickle import copy from datetime import datetime import logging import json import uuid import time import tempfile import os from numbers import Number # For compatibility under py2 to consider unicode as str from six import string_types import ray from ray.tune import TuneError from ray.tune.log_sync import validate_sync_function from ray.tune.logger import pretty_print, UnifiedLogger # NOTE(rkn): We import ray.tune.registry here instead of importing the names we # need because there are cyclic imports that may cause specific names to not # have been defined yet. See https://github.com/ray-project/ray/issues/1716. import ray.tune.registry from ray.tune.result import (DEFAULT_RESULTS_DIR, DONE, HOSTNAME, PID, TIME_TOTAL_S, TRAINING_ITERATION, TIMESTEPS_TOTAL, EPISODE_REWARD_MEAN, MEAN_LOSS, MEAN_ACCURACY) from ray.utils import binary_to_hex, hex_to_binary DEBUG_PRINT_INTERVAL = 5 MAX_LEN_IDENTIFIER = 130 logger = logging.getLogger(__name__) def date_str(): return datetime.today().strftime("%Y-%m-%d_%H-%M-%S") class Resources( namedtuple("Resources", [ "cpu", "gpu", "extra_cpu", "extra_gpu", "custom_resources", "extra_custom_resources" ])): """Ray resources required to schedule a trial. Attributes: cpu (float): Number of CPUs to allocate to the trial. gpu (float): Number of GPUs to allocate to the trial. extra_cpu (float): Extra CPUs to reserve in case the trial needs to launch additional Ray actors that use CPUs. extra_gpu (float): Extra GPUs to reserve in case the trial needs to launch additional Ray actors that use GPUs. custom_resources (dict): Mapping of resource to quantity to allocate to the trial. extra_custom_resources (dict): Extra custom resources to reserve in case the trial needs to launch additional Ray actors that use any of these custom resources. """ __slots__ = () def __new__(cls, cpu, gpu, extra_cpu=0, extra_gpu=0, custom_resources=None, extra_custom_resources=None): custom_resources = custom_resources or {} extra_custom_resources = extra_custom_resources or {} leftovers = set(custom_resources) ^ set(extra_custom_resources) for value in leftovers: custom_resources.setdefault(value, 0) extra_custom_resources.setdefault(value, 0) all_values = [cpu, gpu, extra_cpu, extra_gpu] all_values += list(custom_resources.values()) all_values += list(extra_custom_resources.values()) assert len(custom_resources) == len(extra_custom_resources) for entry in all_values: assert isinstance(entry, Number), "Improper resource value." return super(Resources, cls).__new__(cls, cpu, gpu, extra_cpu, extra_gpu, custom_resources, extra_custom_resources) def summary_string(self): summary = "{} CPUs, {} GPUs".format(self.cpu + self.extra_cpu, self.gpu + self.extra_gpu) custom_summary = ", ".join([ "{} {}".format(self.get_res_total(res), res) for res in self.custom_resources ]) if custom_summary: summary += " ({})".format(custom_summary) return summary def cpu_total(self): return self.cpu + self.extra_cpu def gpu_total(self): return self.gpu + self.extra_gpu def get_res_total(self, key): return self.custom_resources.get( key, 0) + self.extra_custom_resources.get(key, 0) def get(self, key): return self.custom_resources.get(key, 0) def is_nonnegative(self): all_values = [self.cpu, self.gpu, self.extra_cpu, self.extra_gpu] all_values += list(self.custom_resources.values()) all_values += list(self.extra_custom_resources.values()) return all(v >= 0 for v in all_values) @classmethod def subtract(cls, original, to_remove): cpu = original.cpu - to_remove.cpu gpu = original.gpu - to_remove.gpu extra_cpu = original.extra_cpu - to_remove.extra_cpu extra_gpu = original.extra_gpu - to_remove.extra_gpu all_resources = set(original.custom_resources).union( set(to_remove.custom_resources)) new_custom_res = { k: original.custom_resources.get(k, 0) - to_remove.custom_resources.get(k, 0) for k in all_resources } extra_custom_res = { k: original.extra_custom_resources.get(k, 0) - to_remove.extra_custom_resources.get(k, 0) for k in all_resources } return Resources(cpu, gpu, extra_cpu, extra_gpu, new_custom_res, extra_custom_res) def to_json(self): return resources_to_json(self) def json_to_resources(data): if data is None or data == "null": return None if isinstance(data, string_types): data = json.loads(data) for k in data: if k in ["driver_cpu_limit", "driver_gpu_limit"]: raise TuneError( "The field `{}` is no longer supported. Use `extra_cpu` " "or `extra_gpu` instead.".format(k)) if k not in Resources._fields: raise ValueError( "Unknown resource field {}, must be one of {}".format( k, Resources._fields)) return Resources( data.get("cpu", 1), data.get("gpu", 0), data.get("extra_cpu", 0), data.get("extra_gpu", 0), data.get("custom_resources"), data.get("extra_custom_resources")) def resources_to_json(resources): if resources is None: return None return { "cpu": resources.cpu, "gpu": resources.gpu, "extra_cpu": resources.extra_cpu, "extra_gpu": resources.extra_gpu, "custom_resources": resources.custom_resources.copy(), "extra_custom_resources": resources.extra_custom_resources.copy() } def has_trainable(trainable_name): return ray.tune.registry._global_registry.contains( ray.tune.registry.TRAINABLE_CLASS, trainable_name) def recursive_criteria_check(result, criteria): for criteria, stop_value in criteria.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result {}.".format( criteria, result)) elif isinstance(result[criteria], dict) and isinstance( stop_value, dict): if recursive_criteria_check(result[criteria], stop_value): return True elif result[criteria] >= stop_value: return True return False class Checkpoint(object): """Describes a checkpoint of trial state. Checkpoint may be saved in different storage. Attributes: storage (str): Storage type. value (str): If storage==MEMORY,value is a Python object. If storage==DISK,value is a path points to the checkpoint in disk. """ MEMORY = "memory" DISK = "disk" def __init__(self, storage, value, last_result=None): self.storage = storage self.value = value self.last_result = last_result or {} @staticmethod def from_object(value=None): """Creates a checkpoint from a Python object.""" return Checkpoint(Checkpoint.MEMORY, value) class ExportFormat(object): """Describes the format to export the trial Trainable. This may correspond to different file formats based on the Trainable implementation. """ CHECKPOINT = "checkpoint" MODEL = "model" @staticmethod def validate(export_formats): """Validates export_formats. Raises: ValueError if the format is unknown. """ for i in range(len(export_formats)): export_formats[i] = export_formats[i].strip().lower() if export_formats[i] not in [ ExportFormat.CHECKPOINT, ExportFormat.MODEL ]: raise TuneError("Unsupported export format: " + export_formats[i]) class Trial(object): """A trial object holds the state for one model training run. Trials are themselves managed by the TrialRunner class, which implements the event loop for submitting trial runs to a Ray cluster. Trials start in the PENDING state, and transition to RUNNING once started. On error it transitions to ERROR, otherwise TERMINATED on success. """ PENDING = "PENDING" RUNNING = "RUNNING" PAUSED = "PAUSED" TERMINATED = "TERMINATED" ERROR = "ERROR" def __init__(self, trainable_name, config=None, trial_id=None, local_dir=DEFAULT_RESULTS_DIR, experiment_tag="", resources=None, stopping_criterion=None, checkpoint_freq=0, checkpoint_at_end=False, keep_checkpoints_num=None, checkpoint_score_attr="", export_formats=None, restore_path=None, upload_dir=None, trial_name_creator=None, loggers=None, sync_function=None, max_failures=0): """Initialize a new trial. The args here take the same meaning as the command line flags defined in ray.tune.config_parser. """ Trial._registration_check(trainable_name) # Trial config self.trainable_name = trainable_name self.config = config or {} self.local_dir = local_dir # This remains unexpanded for syncing. self.experiment_tag = experiment_tag trainable_cls = self._get_trainable_cls() if trainable_cls and hasattr(trainable_cls, "default_resource_request"): default_resources = trainable_cls.default_resource_request( self.config) if default_resources: if resources: raise ValueError( "Resources for {} have been automatically set to {} " "by its `default_resource_request()` method. Please " "clear the `resources_per_trial` option.".format( trainable_cls, default_resources)) resources = default_resources self.resources = resources or Resources(cpu=1, gpu=0) self.stopping_criterion = stopping_criterion or {} self.upload_dir = upload_dir self.loggers = loggers self.sync_function = sync_function validate_sync_function(sync_function) self.verbose = True self.max_failures = max_failures # Local trial state that is updated during the run self.last_result = {} self.last_update_time = -float("inf") self.checkpoint_freq = checkpoint_freq self.checkpoint_at_end = checkpoint_at_end self.history = [] self.keep_checkpoints_num = keep_checkpoints_num self._cmp_greater = not checkpoint_score_attr.startswith("min-") self.best_checkpoint_attr_value = -float("inf") \ if self._cmp_greater else float("inf") # Strip off "min-" from checkpoint attribute self.checkpoint_score_attr = checkpoint_score_attr \ if self._cmp_greater else checkpoint_score_attr[4:] self._checkpoint = Checkpoint( storage=Checkpoint.DISK, value=restore_path) self.export_formats = export_formats self.status = Trial.PENDING self.logdir = None self.runner = None self.result_logger = None self.last_debug = 0 self.trial_id = Trial.generate_id() if trial_id is None else trial_id self.error_file = None self.num_failures = 0 self.custom_trial_name = None # AutoML fields self.results = None self.best_result = None self.param_config = None self.extra_arg = None self._nonjson_fields = [ "_checkpoint", "loggers", "sync_function", "results", "best_result", "param_config", "extra_arg", ] if trial_name_creator: self.custom_trial_name = trial_name_creator(self) @classmethod def _registration_check(cls, trainable_name): if not has_trainable(trainable_name): # Make sure rllib agents are registered from ray import rllib # noqa: F401 if not has_trainable(trainable_name): raise TuneError("Unknown trainable: " + trainable_name) @classmethod def generate_id(cls): return str(uuid.uuid1().hex)[:8] @classmethod def create_logdir(cls, identifier, local_dir): local_dir = os.path.expanduser(local_dir) if not os.path.exists(local_dir): os.makedirs(local_dir) return tempfile.mkdtemp( prefix="{}_{}".format(identifier[:MAX_LEN_IDENTIFIER], date_str()), dir=local_dir) def init_logger(self): """Init logger.""" if not self.result_logger: if not self.logdir: self.logdir = Trial.create_logdir(str(self), self.local_dir) elif not os.path.exists(self.logdir): os.makedirs(self.logdir) self.result_logger = UnifiedLogger( self.config, self.logdir, upload_uri=self.upload_dir, loggers=self.loggers, sync_function=self.sync_function) def update_resources(self, cpu, gpu, **kwargs): """EXPERIMENTAL: Updates the resource requirements. Should only be called when the trial is not running. Raises: ValueError if trial status is running. """ if self.status is Trial.RUNNING: raise ValueError("Cannot update resources while Trial is running.") self.resources = Resources(cpu, gpu, **kwargs) def sync_logger_to_new_location(self, worker_ip): """Updates the logger location. Also pushes logdir to worker_ip, allowing for cross-node recovery. """ if self.result_logger: self.result_logger.sync_results_to_new_location(worker_ip) def close_logger(self): """Close logger.""" if self.result_logger: self.result_logger.close() self.result_logger = None def write_error_log(self, error_msg): if error_msg and self.logdir: self.num_failures += 1 # may be moved to outer scope? error_file = os.path.join(self.logdir, "error_{}.txt".format(date_str())) with open(error_file, "w") as f: f.write(error_msg) self.error_file = error_file def should_stop(self, result): """Whether the given result meets this trial's stopping criteria.""" if result.get(DONE): return True return recursive_criteria_check(result, self.stopping_criterion) def should_checkpoint(self): """Whether this trial is due for checkpointing.""" result = self.last_result or {} if result.get(DONE) and self.checkpoint_at_end: return True if self.checkpoint_freq: return result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0 else: return False def progress_string(self): """Returns a progress message for printing out to the console.""" if not self.last_result: return self._status_string() def location_string(hostname, pid): if hostname == os.uname()[1]: return "pid={}".format(pid) else: return "{} pid={}".format(hostname, pid) pieces = [ "{}".format(self._status_string()), "[{}]".format( self.resources.summary_string()), "[{}]".format( location_string( self.last_result.get(HOSTNAME), self.last_result.get(PID))), "{} s".format( int(self.last_result.get(TIME_TOTAL_S))) ] if self.last_result.get(TRAINING_ITERATION) is not None: pieces.append("{} iter".format( self.last_result[TRAINING_ITERATION])) if self.last_result.get(TIMESTEPS_TOTAL) is not None: pieces.append("{} ts".format(self.last_result[TIMESTEPS_TOTAL])) if self.last_result.get(EPISODE_REWARD_MEAN) is not None: pieces.append("{} rew".format( format(self.last_result[EPISODE_REWARD_MEAN], ".3g"))) if self.last_result.get(MEAN_LOSS) is not None: pieces.append("{} loss".format( format(self.last_result[MEAN_LOSS], ".3g"))) if self.last_result.get(MEAN_ACCURACY) is not None: pieces.append("{} acc".format( format(self.last_result[MEAN_ACCURACY], ".3g"))) return ", ".join(pieces) def _status_string(self): return "{}{}".format( self.status, ", {} failures: {}".format(self.num_failures, self.error_file) if self.error_file else "") def has_checkpoint(self): return self._checkpoint.value is not None def clear_checkpoint(self): self._checkpoint.value = None def should_recover(self): """Returns whether the trial qualifies for restoring. This is if a checkpoint frequency is set and has not failed more than max_failures. This may return true even when there may not yet be a checkpoint. """ return (self.checkpoint_freq > 0 and (self.num_failures < self.max_failures or self.max_failures < 0)) def update_last_result(self, result, terminate=False): if terminate: result.update(done=True) if self.verbose and (terminate or time.time() - self.last_debug > DEBUG_PRINT_INTERVAL): print("Result for {}:".format(self)) print(" {}".format(pretty_print(result).replace("\n", "\n "))) self.last_debug = time.time() self.last_result = result self.last_update_time = time.time() self.result_logger.on_result(self.last_result) def compare_checkpoints(self, attr_mean): """Compares two checkpoints based on the attribute attr_mean param. Greater than is used by default. If command-line parameter checkpoint_score_attr starts with "min-" less than is used. Arguments: attr_mean: mean of attribute value for the current checkpoint Returns: True: when attr_mean is greater than previous checkpoint attr_mean and greater than function is selected when attr_mean is less than previous checkpoint attr_mean and less than function is selected False: when attr_mean is not in alignment with selected cmp fn """ if self._cmp_greater and attr_mean > self.best_checkpoint_attr_value: return True elif (not self._cmp_greater and attr_mean < self.best_checkpoint_attr_value): return True return False def _get_trainable_cls(self): return ray.tune.registry._global_registry.get( ray.tune.registry.TRAINABLE_CLASS, self.trainable_name) def set_verbose(self, verbose): self.verbose = verbose def is_finished(self): return self.status in [Trial.TERMINATED, Trial.ERROR] def __repr__(self): return str(self) def __str__(self): """Combines ``env`` with ``trainable_name`` and ``experiment_tag``. Can be overriden with a custom string creator. """ if self.custom_trial_name: return self.custom_trial_name if "env" in self.config: env = self.config["env"] if isinstance(env, type): env = env.__name__ identifier = "{}_{}".format(self.trainable_name, env) else: identifier = self.trainable_name if self.experiment_tag: identifier += "_" + self.experiment_tag return identifier.replace("/", "_") def __getstate__(self): """Memento generator for Trial. Sets RUNNING trials to PENDING, and flushes the result logger. Note this can only occur if the trial holds a DISK checkpoint. """ assert self._checkpoint.storage == Checkpoint.DISK, ( "Checkpoint must not be in-memory.") state = self.__dict__.copy() state["resources"] = resources_to_json(self.resources) for key in self._nonjson_fields: state[key] = binary_to_hex(cloudpickle.dumps(state.get(key))) state["runner"] = None state["result_logger"] = None if self.result_logger: self.result_logger.flush() state["__logger_started__"] = True else: state["__logger_started__"] = False return copy.deepcopy(state) def __setstate__(self, state): logger_started = state.pop("__logger_started__") state["resources"] = json_to_resources(state["resources"]) if state["status"] == Trial.RUNNING: state["status"] = Trial.PENDING for key in self._nonjson_fields: state[key] = cloudpickle.loads(hex_to_binary(state[key])) self.__dict__.update(state) Trial._registration_check(self.trainable_name) if logger_started: self.init_logger()
py
1a44579b3b2a438c9612ab753c7d7d362058d19b
# -*- coding: utf-8 -*- """ Created on Thu Aug 11 10:05:26 2016 @author: jaety When our hypothesis is that a collection of strings has regular structure and we want to extract it, how do we model that? It's a restricted form of the NLP exercise. In this case, I want to recognize: 1. Numbers 2. A conditional 3. A regular pattern given the two above What would be the structure it reports? ${NUMBER}_${NUMBER}${CONDITIONAL("_mask")}.tif number := [0-9]+ is_mask:= _mask pattern:= <number>_<number><is_mask>?\.tif How do I represent the cost of different encodings? The grammar itself has an encoding cost cost(number) = len(contents) // could be better, but stick with this for now cost(conditional) = %false * cost(false_pattern) + %true * cost(true_pattern) Grammar Induction? https://en.wikipedia.org/wiki/Grammar_induction https://en.wikipedia.org/wiki/Sequitur_algorithm https://en.wikipedia.org/wiki/Lempel%E2%80%93Ziv%E2%80%93Welch http://archive.euroscipy.org/file/2041/raw/euroscipy2010_abstract-grammar-induction.pdf """ import os src_dir = os.path.expanduser("~/projects/ml/kaggle-nerve/data/train") pattern = "${user}_${slice}($is_mask)?.tif" for item in os.listdir(src_dir): print item
py
1a44579e3ea7be7a26de4c35096c5f04b191df65
import tkinter as tk from tkinter import ttk import numpy as np from itertools import product from display_track import OIdisplay, CMdisplay, Cdisplay, ROIdisplay, MainDisplay class ImageOriginal(): def create_window(self): try: self.iot_Window.destroy() except AttributeError: pass self.iot_Window = tk.Toplevel(self) self.iot_Window.title('Original Image') self.iot_Window.geometry('600x400+100+100') self.iot_Window.protocol('WM_DELETE_WINDOW', lambda: ImageOriginal.close(self)) self.iot_Window.rowconfigure(0, weight = 1) self.iot_Window.columnconfigure(0, weight = 1) self.iot_frameMain = ttk.Frame(self.iot_Window) self.iot_frameMain.rowconfigure(1, weight = 1) self.iot_frameMain.columnconfigure(0, weight = 1) self.iot_frameMain.grid(row = 0, column = 0, sticky = 'nsew', padx = 10, pady = 10) def show(self): if self._menucheckOI.get() == 1: ImageOriginal.create_window(self) OIdisplay.init_canvas(self) OIdisplay.show_image(self) elif self._menucheckOI.get() == 0: try: self.iot_Window.destroy() except AttributeError: pass OIdisplay.hide_delete(self) def close(self): self.iot_Window.destroy() self._menucheckOI.set(0) class ImageInfo(): def create_window(self): try: self.iit_Window.destroy() except AttributeError: pass self.iit_Window = tk.Toplevel(self) self.iit_Window.title('Image Info') #self.iit_Window.geometry('300x360-100-100') self.iit_Window.resizable(0,0) self.iit_Window.protocol('WM_DELETE_WINDOW', lambda: ImageInfo.close(self)) self.iit_frame = ttk.Frame(self.iit_Window) self.iit_frame.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_filedirLabel = ttk.Label(self.iit_frame, text = 'Folder: ') self.iit_filenameLabel = ttk.Label(self.iit_frame, text = 'File: ') self.iit_typeLabel = ttk.Label(self.iit_frame, text = 'Type: ') self.iit_sizepixxLabel = ttk.Label(self.iit_frame, text = 'Size X (pix) :') self.iit_sizepixyLabel = ttk.Label(self.iit_frame, text = 'Size Y (pix) : ') self.iit_sizenmxLabel = ttk.Label(self.iit_frame, text = 'Size X (nm) : ') self.iit_sizenmyLabel = ttk.Label(self.iit_frame, text = 'Size Y (nm) : ') self.iit_calfactorLabel = ttk.Label(self.iit_frame, text = 'Cal. Factor (nm/pix) : ') self.iit_vminLabel = ttk.Label(self.iit_frame, text = 'I min: ') self.iit_vmaxLabel = ttk.Label(self.iit_frame, text = 'I max: ') self.iit_xminLabel = ttk.Label(self.iit_frame, text = 'X min: ') self.iit_xmaxLabel = ttk.Label(self.iit_frame, text = 'X max: ') self.iit_yminLabel = ttk.Label(self.iit_frame, text = 'Y min: ') self.iit_ymaxLabel = ttk.Label(self.iit_frame, text = 'Y max: ') self.iit_filedirDynLabel = ttk.Label(self.iit_frame, textvariable = self._file_info['directory'], wraplength = 160) self.iit_filenameDynLabel = ttk.Label(self.iit_frame, textvariable = self._file_info['file'], wraplength = 160) self.iit_typeDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['type']) self.iit_sizepixxDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['sizepix_x']) self.iit_sizepixyDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['sizepix_y']) self.iit_sizenmxDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['sizenm_x']) self.iit_sizenmyDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['sizenm_y']) self.iit_calfactorDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['cal_factor']) self.iit_vminDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['vmin']) self.iit_vmaxDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['vmax']) self.iit_xminDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['xmin']) self.iit_xmaxDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['xmax']) self.iit_yminDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['ymin']) self.iit_ymaxDynLabel = ttk.Label(self.iit_frame, textvariable = self._img_info['ymax']) self.iit_filedirLabel.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_filenameLabel.grid(row = 1, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_typeLabel.grid(row = 2, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizepixxLabel.grid(row = 3, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizepixyLabel.grid(row = 4, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizenmxLabel.grid(row = 5, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizenmyLabel.grid(row = 6, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_calfactorLabel.grid(row = 7, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_vminLabel.grid(row = 8, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_vmaxLabel.grid(row = 9, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_xminLabel.grid(row = 10, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_xmaxLabel.grid(row = 11, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_yminLabel.grid(row = 12, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_ymaxLabel.grid(row = 13, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.iit_filedirDynLabel.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_filenameDynLabel.grid(row = 1, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_typeDynLabel.grid(row = 2, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizepixxDynLabel.grid(row = 3, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizepixyDynLabel.grid(row = 4, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizenmxDynLabel.grid(row = 5, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_sizenmyDynLabel.grid(row = 6, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_calfactorDynLabel.grid(row = 7, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_vminDynLabel.grid(row = 8, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_vmaxDynLabel.grid(row = 9, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_xminDynLabel.grid(row = 10, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_xmaxDynLabel.grid(row = 11, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_yminDynLabel.grid(row = 12, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.iit_ymaxDynLabel.grid(row = 13, column = 1, sticky = 'nsew', padx = 2, pady = 2) def show(self): if self._menucheckII.get() == 1: ImageInfo.create_window(self) elif self._menucheckII.get() == 0: try: self.iit_Window.destroy() except AttributeError: pass def close(self): self.iit_Window.destroy() self._menucheckII.set(0) class ImageColormap(): def invert(self): if self._menucheckCI.get() == 1: colormap = self._colormap_options.get('Current Main')+'_r' elif self._menucheckCI.get() == 0: colormap = self._colormap_options.get('Current Main').replace('_r','') self._colormap_options['Current Main'] = colormap self._s_img.set_cmap(colormap) self._canvas.draw() def change(self): colormap_option = self._menucheckCO.get() if colormap_option == 0: colormap = 'gray' elif colormap_option == 1: colormap = 'bone' elif colormap_option == 2: colormap = 'hot' elif colormap_option == 3: colormap = 'magma' elif colormap_option == 4: colormap = 'inferno' self._colormap_options['Current Main'] = colormap ImageColormap.invert(self) def other(self): colormap = self._colormap_options.get('Current Main') if 'gray' in colormap : colormap_option = 0 elif 'bone' in colormap: colormap_option = 1 elif 'hot' in colormap: colormap_option = 2 elif 'magma' in colormap: colormap_option = 3 elif 'inferno' in colormap: colormap_option = 4 else: colormap_option = 5 self._menucheckCO.set(colormap_option) ImageColormap.other_create(self) def other_create(self): try: self.ico_Window.destroy() except AttributeError: pass self.ico_Window = tk.Toplevel(self) self.ico_Window.title('Other Colormap') #self.ico_Window.resizable(0,0) self.ico_frame = ttk.Frame(self.ico_Window) self.ico_frame.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ico_buttonFrame = ttk.Frame(self.ico_Window) CMdisplay.show_colormaps(self) self.ico_combobox = ttk.Combobox(self.ico_frame, values = self._colormap_options['Available']) self.ico_combobox.set(self._colormap_options.get('Current Main').replace('_r','')) self.ico_applyButton = ttk.Button(self.ico_buttonFrame, text = 'Apply', command = lambda: ImageColormap.other_apply(self)) self.ico_okButton = ttk.Button(self.ico_buttonFrame, text = 'OK', command = lambda: ImageColormap.other_ok(self)) self.ico_combobox.grid(row = 1, column = 0) self.ico_buttonFrame.grid(row = 2, column = 0) self.ico_applyButton.grid(row = 0, column = 0) self.ico_okButton.grid(row = 0, column = 1) def other_apply(self): self._colormap_options['Current Main'] = self.ico_combobox.get() self._menucheckCO.set(5) ImageColormap.invert(self) def other_ok(self): ImageColormap.other_apply(self) self.ico_Window.destroy() try: self._ic_canvas.delete(ALL) except AttributeError: pass self._menucheckCO.set(5) class ImageContrast(): def show(self): if self._menucheckCC.get() == 1: ImageContrast.create(self) elif self._menucheckCC.get() == 0: ImageContrast.close(self) def create(self): try: self.ic_Window.destroy() except AttributeError: pass self.ic_Window = tk.Toplevel(self) self.ic_Window.title('Adjust Contrast') self.ic_Window.geometry('300x300-100+200') self.ic_Window.protocol('WM_DELETE_WINDOW', lambda: ImageContrast.close(self)) try: vmin = self._colormap_options['Vmin'] vmax = self._colormap_options['Vmax'] except KeyError: vmin = np.min(self._mat_img.flatten()) vmax = np.max(self._mat_img.flatten()) self.ic_frame = ttk.Frame(self.ic_Window) self.ic_controlFrame = ttk.Frame(self.ic_Window) self.ic_buttonFrame = ttk.Frame(self.ic_Window) self.ic_frame.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ic_controlFrame.grid(row = 1, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ic_buttonFrame.grid(row = 2, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ic_Window.rowconfigure([0,1,2], weight = 1) self.ic_Window.columnconfigure(0, weight = 1) self.ic_frame.columnconfigure(0, weight = 1) self.ic_frame.rowconfigure(0, weight = 1) self.ic_controlFrame.columnconfigure(0, weight = 1) self.ic_buttonFrame.columnconfigure([0,1], weight = 1) self.ic_vminSlider = tk.Scale(self.ic_controlFrame, orient = 'horizontal', from_ = np.min(self._mat_img.flatten()), to = np.max(self._mat_img.flatten())) self.ic_vmaxSlider = tk.Scale(self.ic_controlFrame, orient = 'horizontal', from_ = np.min(self._mat_img.flatten()), to = np.max(self._mat_img.flatten())) self.ic_applyButton = ttk.Button(self.ic_buttonFrame, text = 'Apply', command = lambda: ImageContrast.ok_close(self)) self.ic_closeButton = ttk.Button(self.ic_buttonFrame, text = 'Close', command = lambda: ImageContrast.close(self)) self.ic_vminSlider.bind('<ButtonRelease-1>', lambda event, arg = self: ImageContrast.change_slide(arg, event)) self.ic_vmaxSlider.bind('<ButtonRelease-1>', lambda event, arg = self: ImageContrast.change_slide(arg, event)) self.ic_vminSlider.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ic_vmaxSlider.grid(row = 1, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ic_applyButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 10, pady = 2) self.ic_closeButton.grid(row = 0, column = 1, sticky = 'nsew', padx = 10, pady = 2) self.ic_vminSlider.set(vmin) self.ic_vmaxSlider.set(vmax) Cdisplay.show_histogram(self) Cdisplay.update_clim(self) def change_slide(self, event): if self.ic_vminSlider.cget('state') == 'active': self.ic_vminSlider.after(100) elif self.ic_vmaxSlider.cget('state') == 'active': self.ic_vmaxSlider.after(100) vmin = self.ic_vminSlider.get() vmax = self.ic_vmaxSlider.get() Cdisplay.update_clim(self) def ok_close(self): vmin = self.ic_vminSlider.get() vmax = self.ic_vmaxSlider.get() self._colormap_options['Vmin'] = vmin self._colormap_options['Vmax'] = vmax ImageContrast.close(self) def close(self): try: vmin = self._colormap_options['Vmin'] vmax = self._colormap_options['Vmax'] except KeyError: vmin = np.min(self._mat_img.flatten()) vmax = np.max(self._mat_img.flatten()) self._s_img.set_clim([vmin, vmax]) self._canvas.draw() try: self.ic_Window.destroy() except AttributeError: pass self._menucheckCC.set(0) class ImageOverlay(): def init_var(self): self._overcmap = { 'Basic': ['none', 'black', 'gray', 'white', 'yellow', 'orange', 'red', 'magenta', 'blue', 'cyan', 'green', ] } self._overedge = { 'enable': tk.IntVar(), 'size' : tk.StringVar(), 'ecolor': tk.StringVar(), 'fcolor': tk.StringVar() } self._overskel = { 'enable': tk.IntVar(), 'size' : tk.StringVar(), 'ecolor': tk.StringVar(), 'fcolor': tk.StringVar() } self._overlabel = { 'enable': tk.IntVar(), 'size' : tk.StringVar(), 'ecolor': tk.StringVar(), } self._overfit = { 'enable': tk.IntVar(), 'lwidth' : tk.StringVar(), 'color': tk.StringVar() } self._overedge['enable'].set(0) self._overedge['size'].set('5') self._overedge['ecolor'].set('none') self._overedge['fcolor'].set('orange') self._overskel['enable'].set(0) self._overskel['size'].set('5') self._overskel['ecolor'].set('none') self._overskel['fcolor'].set('cyan') self._overlabel['enable'].set(0) self._overlabel['size'].set('5') self._overlabel['ecolor'].set('none') self._overfit['enable'].set(0) self._overfit['lwidth'].set(1) self._overfit['color'].set('yellow') def show(self): if self._menucheckOO.get() == 1: ImageOverlay.create_options(self) ImageOverlay.setstate_init(self) else: ImageOverlay.close(self) def create_options(self): try: self.ov_Window.destroy() except AttributeError: pass self.ov_Window = tk.Toplevel(self) self.ov_Window.title('Display Overlay Options') #self.ov_Window.geometry('450x300-250+80') self.ov_Window.resizable(0,0) self.ov_Window.protocol('WM_DELETE_WINDOW', lambda: ImageOverlay.close(self)) self.ov_Window.rowconfigure([0,1,2,3,4,5], weight = 1) self.ov_Window.columnconfigure(0, weight = 1) self.oveLabelFrame = ttk.LabelFrame(self.ov_Window, text = 'Edge Options') self.ovsLabelFrame = ttk.LabelFrame(self.ov_Window, text = 'Skeleton Options') self.ovlLabelFrame = ttk.LabelFrame(self.ov_Window, text = 'Label Options') self.ovfLabelFrame = ttk.LabelFrame(self.ov_Window, text = 'Fit Options') self.ovbuttonFrame = ttk.Frame(self.ov_Window) self.ove_enButton = ttk.Button(self.oveLabelFrame, text = 'Enable', style = 'SunkableButton.TButton', command = lambda: ImageOverlay.enable_edge(self)) self.ove_szLabel = ttk.Label(self.oveLabelFrame, text = 'Size : ') self.ove_szSpinbox = tk.Spinbox(self.oveLabelFrame, width = 3) self.ove_szSpinbox.delete(0,'end') self.ove_szSpinbox.insert(0, self._overedge['size'].get()) self.ove_ecLabel = ttk.Label(self.oveLabelFrame, text = 'Edge color : ') self.ove_ecCombobox = ttk.Combobox(self.oveLabelFrame, width = 7, values = self._overcmap['Basic']) self.ove_ecCombobox.set(self._overedge['ecolor'].get()) self.ove_fcLabel = ttk.Label(self.oveLabelFrame, text = 'Face color: ') self.ove_fcCombobox = ttk.Combobox(self.oveLabelFrame, width = 7, values = self._overcmap['Basic']) self.ove_fcCombobox.set(self._overedge['fcolor'].get()) self.ovs_enButton = ttk.Button(self.ovsLabelFrame, text = 'Enable', style = 'SunkableButton.TButton', command = lambda: ImageOverlay.enable_skeleton(self)) self.ovs_szLabel = ttk.Label(self.ovsLabelFrame, text = 'Size : ') self.ovs_szSpinbox = tk.Spinbox(self.ovsLabelFrame, width = 3) self.ovs_szSpinbox.delete(0,'end') self.ovs_szSpinbox.insert(0, self._overskel['size'].get()) self.ovs_ecLabel = ttk.Label(self.ovsLabelFrame, text = 'Edge color : ') self.ovs_ecCombobox = ttk.Combobox(self.ovsLabelFrame, width = 7, values = self._overcmap['Basic']) self.ovs_ecCombobox.set(self._overskel['ecolor'].get()) self.ovs_fcLabel = ttk.Label(self.ovsLabelFrame, text = 'Face color: ') self.ovs_fcCombobox = ttk.Combobox(self.ovsLabelFrame, width = 7, values = self._overcmap['Basic']) self.ovs_fcCombobox.set(self._overskel['fcolor'].get()) self.ovl_enButton = ttk.Button(self.ovlLabelFrame, text = 'Enable', style = 'SunkableButton.TButton', command = lambda: ImageOverlay.enable_labels(self)) self.ovl_szLabel = ttk.Label(self.ovlLabelFrame, text = 'Size : ') self.ovl_szSpinbox = tk.Spinbox(self.ovlLabelFrame, width = 3) self.ovl_szSpinbox.delete(0,'end') self.ovl_szSpinbox.insert(0, self._overlabel['size'].get()) self.ovl_ecLabel = ttk.Label(self.ovlLabelFrame, text = 'Edge color : ') self.ovl_ecCombobox = ttk.Combobox(self.ovlLabelFrame, width = 7, values = self._overcmap['Basic']) self.ovl_ecCombobox.set(self._overlabel['ecolor'].get()) self.ovf_enButton = ttk.Button(self.ovfLabelFrame, text = 'Enable', style = 'SunkableButton.TButton', command = lambda: ImageOverlay.enable_fit(self)) self.ovf_lwLabel = ttk.Label(self.ovfLabelFrame, text = 'Line Width : ') self.ovf_lwSpinbox = tk.Spinbox(self.ovfLabelFrame, width = 3) self.ovf_lwSpinbox.delete(0,'end') self.ovf_lwSpinbox.insert(0, self._overfit['lwidth'].get()) self.ovf_lcLabel = ttk.Label(self.ovfLabelFrame, text = 'Line Color : ') self.ovf_lcCombobox = ttk.Combobox(self.ovfLabelFrame, width = 7, values = self._overcmap['Basic']) self.ovf_lcCombobox.set(self._overfit['color'].get()) self.ovapplyButton = ttk.Button(self.ovbuttonFrame, text = 'Apply', command = lambda: ImageOverlay.apply(self)) self.ovcloseButton = ttk.Button(self.ovbuttonFrame, text = 'Close', command = lambda: ImageOverlay.close(self)) self.oveLabelFrame.rowconfigure(0, weight = 1) self.ovsLabelFrame.rowconfigure(0, weight = 1) self.ovlLabelFrame.rowconfigure(0, weight = 1) self.ovfLabelFrame.rowconfigure(0, weight = 1) self.ovbuttonFrame.columnconfigure([0,1], weight = 1) self.oveLabelFrame.grid(row = 1, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovsLabelFrame.grid(row = 2, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovlLabelFrame.grid(row = 3, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovfLabelFrame.grid(row = 4, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovbuttonFrame.grid(row = 5, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ove_enButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ove_szLabel.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.ove_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew', padx = 2, pady = 2) self.ove_ecLabel.grid(row = 0, column = 3, sticky = 'nsew', padx = 2, pady = 2) self.ove_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew', padx = 2, pady = 2) self.ove_fcLabel.grid(row = 0, column = 5, sticky = 'nsew', padx = 2, pady = 2) self.ove_fcCombobox.grid(row = 0, column = 6, sticky = 'nsew', padx = 2, pady = 2) self.ovs_enButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovs_szLabel.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.ovs_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew', padx = 2, pady = 2) self.ovs_ecLabel.grid(row = 0, column = 3, sticky = 'nsew', padx = 2, pady = 2) self.ovs_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew', padx = 2, pady = 2) self.ovs_fcLabel.grid(row = 0, column = 5, sticky = 'nsew', padx = 2, pady = 2) self.ovs_fcCombobox.grid(row = 0, column = 6, sticky = 'nsew', padx = 2, pady = 2) self.ovl_enButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovl_szLabel.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.ovl_szSpinbox.grid(row = 0, column = 2, sticky = 'nsew', padx = 2, pady = 2) self.ovl_ecLabel.grid(row = 0, column = 3, sticky = 'nsew', padx = 2, pady = 2) self.ovl_ecCombobox.grid(row = 0, column = 4, sticky = 'nsew', padx = 2, pady = 2) self.ovf_enButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.ovf_lwLabel.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 2) self.ovf_lwSpinbox.grid(row = 0, column = 2, sticky = 'nsew', padx = 2, pady = 2) self.ovf_lcLabel.grid(row = 0, column = 3, sticky = 'nsew', padx = 2, pady = 2) self.ovf_lcCombobox.grid(row = 0, column = 4, sticky = 'nsew', padx = 2, pady = 2) self.ovapplyButton.grid(row = 0, column = 0, sticky = 'snew', padx = 50, pady = 2) self.ovcloseButton.grid(row = 0, column = 1, sticky = 'nsew', padx = 50, pady = 2) def setstate_init(self): try: self._skeleton_image except AttributeError: self._overskel['enable'].set(0) try: self._mask_edge except AttributeError: self._overedge['enable'].set(0) try: self._labelled_filaments except: AttributeError: self._overlabel['enable'].set(0) try: self._m except: AttributeError: self._overfit['enable'].set(0) if self._overedge['enable'].get() == 1: self.ove_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) if self._overskel['enable'].get() == 1: self.ovs_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) if self._overlabel['enable'].get() == 1: self.ovl_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) if self._overfit['enable'].get() == 1: self.ovf_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) def enable_edge(self): if self._overedge['enable'].get() == 1: self.ove_enButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._overedge['enable'].set(0) elif self._overedge['enable'].get() == 0: self.ove_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._overedge['enable'].set(1) def enable_skeleton(self): if self._overskel['enable'].get() == 1: self.ovs_enButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._overskel['enable'].set(0) elif self._overskel['enable'].get() == 0: self.ovs_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._overskel['enable'].set(1) def enable_labels(self): if self._overlabel['enable'].get() == 1: self.ovl_enButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._overlabel['enable'].set(0) elif self._overlabel['enable'].get() == 0: self.ovl_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._overlabel['enable'].set(1) def enable_fit(self): if self._overfit['enable'].get() == 1: self.ovf_enButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._overfit['enable'].set(0) elif self._overfit['enable'].get() == 0: self.ovf_enButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._overfit['enable'].set(1) def apply(self): self._overedge['size'].set(self.ove_szSpinbox.get()) self._overedge['ecolor'].set(self.ove_ecCombobox.get()) self._overedge['fcolor'].set(self.ove_fcCombobox.get()) self._overskel['size'].set(self.ovs_szSpinbox.get()) self._overskel['ecolor'].set(self.ovs_ecCombobox.get()) self._overskel['fcolor'].set(self.ovs_fcCombobox.get()) self._overlabel['size'].set(self.ovl_szSpinbox.get()) self._overlabel['ecolor'].set(self.ovl_ecCombobox.get()) self._overfit['lwidth'].set(self.ovf_lwSpinbox.get()) self._overfit['color'].set(self.ovf_lcCombobox.get()) MainDisplay.show_overlay(self) try: ROIdisplay.show_roi(self) except AttributeError: pass ImageOverlay.setstate_cpanel(self) def setstate_cpanel(self): if self._overedge['enable'].get() == 1: self.eshowButton.config(text = 'Hide') elif self._overedge['enable'].get() == 0: self.eshowButton.config(text = 'Show') if self._overskel['enable'].get() == 1: self.skskeletonButton.config(text = 'Hide') elif self._overskel['enable'].get() == 0: self.skskeletonButton.config(text = 'Skeleton') if self._overlabel['enable'].get() == 1: self.tshowlabelButton.config(text = 'Hide Labels') elif self._overlabel['enable'].get() == 0: self.tshowlabelButton.config(text = 'Show Labels') if self._overfit['enable'].get() == 1: self.tshowfitButton.config(text = 'Hide Fit') elif self._overfit['enable'].get() == 0: self.tshowfitButton.config(text = 'Show Fit') self.skfilterButton.config(text = 'Filter') self.skmaskButton.config(text = 'Mask') def close(self): self.ov_Window.destroy() self._menucheckOO.set(0) class ROImanager(): def init_var(self): self._roicircle = 0 self._roirect = 0 self._roipoly = 0 self._deledge = tk.IntVar() self._delskel = tk.IntVar() self._delchain = tk.IntVar() self._deledge.set(1) self._delskel.set(1) self._delchain.set(1) def create_window(self): try: self.rt_Window.destroy() except AttributeError: pass self.rt_Window = tk.Toplevel(self) self.rt_Window.title('ROI Manager Tracking') #self.rt_Window.geometry('240x350-80+50') self.rt_Window.resizable(0,1) self.rt_Window.protocol('WM_DELETE_WINDOW', lambda: ROImanager.close(self)) self.rt_Window.columnconfigure(0, weight = 1) self.rt_Window.rowconfigure(1, weight = 1) self.rt_drawFrame = ttk.Frame(self.rt_Window) self.roicircleButton = ttk.Button(self.rt_drawFrame, text = 'Circle', style = 'SunkableButton.TButton', command = lambda: ROImanager.draw_circle(self)) self.roirectButton = ttk.Button(self.rt_drawFrame, text = 'Rectangle', style = 'SunkableButton.TButton', command = lambda: ROImanager.draw_rectangle(self)) self.roipolyButton = ttk.Button(self.rt_drawFrame, text = 'Polygon') self.rt_middleFrame = ttk.Frame(self.rt_Window) self.rt_middleFrame.rowconfigure(0, weight = 1) self.rt_middleFrame.columnconfigure([0,1], weight = 1) self.roilistFrame = ttk.LabelFrame(self.rt_middleFrame, text = 'ROIs') self.roilistFrame.rowconfigure(0, weight = 1) self.roiListbox = tk.Listbox(self.roilistFrame, width = 15, selectmode = 'extended') self.roiListbox.bind('<<ListboxSelect>>', lambda event, arg = self: ROIdisplay.draw_selec(self, event)) self.roilistScrollbar = ttk.Scrollbar(self.roilistFrame) self.roilistScrollbar.config(command = self.roiListbox.yview) self.roiListbox.config(yscrollcommand = self.roilistScrollbar.set) self.rt_manageFrame = ttk.Frame(self.rt_middleFrame) self.roiselectallButton = ttk.Button(self.rt_manageFrame, text = 'Select All', command = lambda: ROImanager.selectall_roiList(self)) self.roiclearallButton = ttk.Button(self.rt_manageFrame, text = 'Clear All', command = lambda: ROImanager.clearall_roiList(self)) self.roideleteallButton = ttk.Button(self.rt_manageFrame, text = 'Delete All', command = lambda: ROImanager.keepdelall_roi(self, 0)) self.roikeepallButton = ttk.Button(self.rt_manageFrame, text = 'Keep All', command = lambda: ROImanager.keepdelall_roi(self, 1)) self.roideleteselecButton = ttk.Button(self.rt_manageFrame, text = 'Delete Selection', command = lambda: ROImanager.keepdelsel_roi(self, 0)) self.roikeepselecButton = ttk.Button(self.rt_manageFrame, text = 'Keep Selection', command = lambda: ROImanager.keepdelsel_roi(self,1)) self.rt_bottomFrame = ttk.Frame(self.rt_Window) self.rt_bottomFrame.columnconfigure([0,1], weight = 1) self.roioptionsButton = ttk.Button(self.rt_bottomFrame, text = 'Options', command = lambda: ROImanager.create_options(self)) self.roicloseButton = ttk.Button(self.rt_bottomFrame, text = 'Close', command = lambda: ROImanager.close(self)) self.rt_drawFrame.grid(row = 0, column = 0, sticky = 'nsew') self.roicircleButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 10) self.roirectButton.grid(row = 0, column = 1, sticky = 'nsew', padx = 2, pady = 10) self.roipolyButton.grid(row = 0, column = 2, sticky = 'nsew', padx = 2, pady = 10) self.rt_middleFrame.grid(row = 1, column = 0, sticky = 'nsew') self.roilistFrame.grid(row = 0, column = 0, sticky = 'ns') self.roiListbox.grid(row = 0, column = 0, sticky = 'ns') self.roilistScrollbar.grid(row = 0, column = 1, sticky = 'ns') self.rt_manageFrame.grid(row = 0, column = 1, sticky = 'nsew') self.roiselectallButton.grid(row = 0, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.roiclearallButton.grid(row = 1, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.roideleteallButton.grid(row = 2, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.roikeepallButton.grid(row = 3, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.roideleteselecButton.grid(row = 4, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.roikeepselecButton.grid(row = 5, column = 0, sticky = 'nsew', pady = 2, padx = 2) self.rt_bottomFrame.grid(row = 2, column = 0, sticky = 'nsew') self.roioptionsButton.grid(row = 0, column = 0, sticky = 'nsew', padx = 10, pady = 10) self.roicloseButton.grid(row = 0, column = 1, sticky = 'nsew', padx = 10, pady = 10) try: self._roipath[-1] ROImanager.setstate_roi(self) ROIdisplay.show_roi(self) ROImanager.update_roiList(self) except AttributeError: ROImanager.setstate_noroi(self) def update_roiList(self): self.roiListbox.delete(0,'end') for n, item in enumerate(self._roipath): if hasattr(item, 'get_radius'): text = 'Circle ' elif hasattr(item, 'get_width'): text = 'Rectangle ' self.roiListbox.insert('end', text + str(n+1)) def selectall_roiList(self): self.roiListbox.selection_clear(0, 'end') self.roiListbox.selection_set(0, 'end') ROIdisplay.draw_selec(self, '<Button-1>') def clearall_roiList(self): MainDisplay.show_overlay(self) ROIdisplay.noshow_roi(self) del self._roipath del self._roilabel self.roiListbox.delete(0, 'end') self._canvas.draw() def create_options(self): try: self.ro_Window.destroy() except AttributeError: pass self.ro_Window = tk.Toplevel(self) self.ro_Window.title('ROI data options') #self.ro_Window.geometry('180x150-250+100') self.ro_Window.resizable(0,0) self.roLabelFrame = ttk.LabelFrame(self.ro_Window, text = 'Select variables to consider') self.roideledgeCheckbutton = ttk.Checkbutton(self.roLabelFrame, text = 'Edges', variable = self._deledge) self.roidelskelCheckbutton = ttk.Checkbutton(self.roLabelFrame, text = 'Skeleton', variable = self._delskel) self.roidelchainCheckbutton = ttk.Checkbutton(self.roLabelFrame, text = 'Labelled Chains', variable = self._delchain) self.roidelcloseButton = ttk.Button(self.roLabelFrame, text = 'Close', command = lambda: self.ro_Window.destroy()) self.ro_Window.rowconfigure(0, weight = 1) self.ro_Window.columnconfigure(0, weight = 1) self.roLabelFrame.columnconfigure(0, weight = 1) self.roLabelFrame.rowconfigure([0,1,2], weight = 1) self.roLabelFrame.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.roideledgeCheckbutton.grid(row = 0, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.roidelskelCheckbutton.grid(row = 1, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.roidelchainCheckbutton.grid(row = 2, column = 0, sticky = 'nsew', padx = 2, pady = 2) self.roidelcloseButton.grid(row = 3, column = 0, sticky = 'nsew', padx = 5, pady = 2) def keepdelall_roi(self, keep): self.roiListbox.selection_clear(0, 'end') self.roiListbox.selection_set(0, 'end') if keep == 0: ROImanager.deldata_inroi(self) elif keep == 1: ROImanager.keepdata_inroi(self) ROIdisplay.noshow_roi(self) del self._roipath; del self._roilabel self.roiListbox.delete(0, 'end') MainDisplay.show_overlay(self) def keepdelsel_roi(self, keep): if keep == 0: ROImanager.deldata_inroi(self) list_del = self.roiListbox.curselection() elif keep == 1: ROImanager.keepdata_inroi(self) list_del = [item for item in np.arange(self.roiListbox.size()) if item not in self.roiListbox.curselection()] ROIdisplay.noshow_roi(self) for item in sorted(list_del, reverse=True): del self._roipath[item] del self._roilabel[item] for n, item in enumerate(self._roilabel): item.set_text(str(n+1)) MainDisplay.show_overlay(self) ROIdisplay.show_roi(self) ROImanager.update_roiList(self) def keepdata_inroi(self): mask_all = np.zeros(self._mat_img.shape) for item in self.roiListbox.curselection(): mask = ROImanager.data_roi(self, self._roipath[item], 1) mask_all = mask_all+mask if self._deledge.get() == 1: try: self._mask_edge = self._mask_edge*mask_all except AttributeError: pass if self._delskel.get() == 1: try: self._skeleton_image = self._skeleton_image*mask_all except AttributeError: pass if self._delchain.get() == 1: try: self._labelled_filaments = self._labelled_filaments*mask_all except AttributeError: pass def deldata_inroi(self): for item in self.roiListbox.curselection(): mask = ROImanager.data_roi(self, self._roipath[item], 0) if self._deledge.get() == 1: try: self._mask_edge = self._mask_edge*mask except AttributeError: pass if self._delskel.get() == 1: try: self._skeleton_image = self._skeleton_image*mask except AttributeError: pass if self._delchain.get() == 1: try: self._labelled_filaments = self._labelled_filaments*mask except AttributeError: pass def data_roi(self, id_roi, keep): if keep == 1: mask = np.zeros(self._mat_img.shape) elif keep == 0: mask = np.ones(self._mat_img.shape) if hasattr(id_roi, 'get_width'): x,y = id_roi.get_xy() width = id_roi.get_width() height = id_roi.get_height() mat_roi = np.array(list(product( range(int(x),int(x+width)), range(int(y),int(y+height))))) for point in mat_roi: mask[point[1], point[0]] = keep elif hasattr(id_roi, 'get_radius'): x,y = id_roi.center r = id_roi.get_radius() mat_limroi = np.array(list(product( range(int(x-r), int(x+r)), range(int(y-r), int(y+r))))) for point in mat_limroi: dist = np.sqrt((point[0]-x)**2+(point[1]-y)**2) if dist<= r : mask[point[1], point[0]] = keep return mask def setstate_noroi(self): self.roiselectallButton.state(['disabled']) self.roiclearallButton.state(['disabled']) self.roideleteallButton.state(['disabled']) self.roikeepallButton.state(['disabled']) def setstate_roi(self): self.roiselectallButton.state(['!disabled']) self.roiclearallButton.state(['!disabled']) self.roideleteallButton.state(['!disabled']) self.roikeepallButton.state(['!disabled']) def close(self): if self._roicircle == 1: ROImanager.draw_circle(self) elif self._roirect == 1: ROImanager.draw_rectangle(self) try: ROIdisplay.noshow_roi(self) except AttributeError: pass self.rt_Window.destroy() self._menucheckROI.set(0) def connect_mpl(self): self._cid_press = self._canvas.mpl_connect('button_press_event', lambda event, arg = self: ROIdisplay.on_mousepress(arg, event)) self._cid_drag = self._canvas.mpl_connect('motion_notify_event', lambda event, arg = self: ROIdisplay.on_mousedrag(arg, event)) self._cid_up = self._canvas.mpl_connect('button_release_event', lambda event, arg = self: ROIdisplay.on_mouseup(arg, event)) def disconnect_mpl(self): self._canvas.mpl_disconnect(self._cid_press) self._canvas.mpl_disconnect(self._cid_drag) self._canvas.mpl_disconnect(self._cid_up) def draw_circle(self): if self._roirect == 1: ROImanager.draw_rectangle(self) self._drawmethod = 0 self._cpressed = 0 if self._roicircle == 1: ROImanager.disconnect_mpl(self) self.roicircleButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._roicircle = 0 elif self._roicircle == 0: ROImanager.connect_mpl(self) self.roicircleButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._roicircle = 1 def draw_rectangle(self): if self._roicircle == 1 : ROImanager.draw_circle(self) self._drawmethod = 1 self._cpressed = 0 if self._roirect == 1: ROImanager.disconnect_mpl(self) self.roirectButton.state(['!pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.RAISED) self._roirect = 0 elif self._roirect == 0: ROImanager.connect_mpl(self) self.roirectButton.state(['pressed']) ttk.Style().configure('SunkableButton.TButton', relief = tk.SUNKEN) self._roirect = 1
py
1a4458921998e1998ee32fff9a75223013297fa5
import unittest from jupytervvp.variablesubstitution import VvpFormatter, NonExistentVariableException, VariableSyntaxException class VariableSubstitutionTests(unittest.TestCase): def test_substitute_user_variables_works(self): input_text = """ INSERT INTO {{ namespace }}_{resultsTable} SELECT * FROM {{ namespace }}_{tableName} """ user_ns = {"resultsTable": "table1", "tableName": "table2"} formatter = VvpFormatter(input_text, user_ns) expected_output = """ INSERT INTO {{ namespace }}_table1 SELECT * FROM {{ namespace }}_table2 """ actual_output = formatter.substitute_user_variables() assert actual_output == expected_output def test_substitute_user_variables_undefined_variable_throws(self): input_text = "{var1} sat on {var2}." user_ns = {"var1": "The cat"} formatter = VvpFormatter(input_text, user_ns) with self.assertRaises(NonExistentVariableException) as exception: formatter.substitute_user_variables() assert exception.exception.variable_name == "var2" def test_substitute_user_variables_ambiguous_throws(self): input_text = "{var1} sat on {{var2}." user_ns = {"var1": "The cat"} formatter = VvpFormatter(input_text, user_ns) with self.assertRaises(VariableSyntaxException) as exception: formatter.substitute_user_variables() assert exception.exception.bad_text == "{{var2}" def test_prepare_escaped_variables_works_in_simple_case(self): input_text = "{{ variable }} and {{ another }} with { ignore }" expected = "{{{{ variable }}}} and {{{{ another }}}} with { ignore }" assert VvpFormatter._prepare_escaped_variables(input_text) == expected def test_prepare_escaped_variables_throws_in_ambiguous_case(self): input_text = "{{ good }} and {also_good} and {{bad_because_no_spaces}}" user_ns = {"also_good": "dummy_value"} formatter = VvpFormatter(input_text, user_ns) with self.assertRaises(VariableSyntaxException) as exception: formatter.substitute_user_variables() assert exception.exception.bad_text == "{{bad_because_no_spaces}" def test_substitute_variables_works_in_simple_case(self): input_text = "{var1} sat on {var2}." escaped_text = input_text user_ns = {"var1": "The cat", "var2": "the mat"} formatter = VvpFormatter(input_text, user_ns) formatted = formatter._substitute_variables(escaped_text) assert formatted == "The cat sat on the mat." def test_substitute_variables_four_braces_transformed_to_two(self): input_text = "{var1} sat on {{ sittingObject }}." escaped_text = "{var1} sat on {{{{ sittingObject }}}}." user_ns = {"var1": "The cat"} formatter = VvpFormatter(input_text, user_ns) formatted = formatter._substitute_variables(escaped_text) assert formatted == "The cat sat on {{ sittingObject }}." def test_get_ambiguous_syntax_returns_nothing_if_correct(self): input_text = "{good} and {{ good }}" assert VvpFormatter._get_ambiguous_syntax(input_text) is None def test_get_ambiguous_syntax_finds_missing_spaces(self): test_data = { "{{myvar}}": "{{myvar}", # missing space { "{{myvar": "{{myvar", # missing space; no closing brace match "myvar}}": "myvar}}", # missing space } "{ { myvar}}": "{ myvar}}", # only get up to next brace back "{{ myvar}}": "{ myvar}}", # same even if double braces "{ {{ myvar}}": "{ myvar}}" # matches missing spaces before nesting } for test_input in test_data.keys(): assert VvpFormatter._get_ambiguous_syntax(test_input) == test_data[test_input] def test_get_ambiguous_syntax_does_not_parse_inside_brackets(self): test_data = { "{{ myvar }}": None, "{{ myvar myvar2 }}": None, } for test_input in test_data.keys(): assert VvpFormatter._get_ambiguous_syntax(test_input) == test_data[test_input] def test_get_ambiguous_syntax_finds_multiple_braces(self): input_text = "{{{ myvar }}}" assert VvpFormatter._get_ambiguous_syntax(input_text) == "{{{ myvar }" def test_get_ambiguous_syntax_finds_nesting(self): test_data = { "{ {myvar} }": "{ {myvar}", "{{ {myvar } }}": "{ {myvar }" # inside double braces not parsed, but nesting detected } for input_data in test_data.keys(): assert VvpFormatter._get_ambiguous_syntax(input_data) == test_data[input_data]
py
1a4458923036336c2ce3da0585f2abc64263986f
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import os import pathlib import typing from multiprocessing.pool import ThreadPool import requests CPU_FRACTION = 0.5 WORKER_THREADS = max(int(os.cpu_count() * CPU_FRACTION), 1) def download_item(source_target: typing.Tuple[str, pathlib.Path]): """ThreadPool.imap_unordered accepts tuples as arguments to the callable""" source_url, download_path = source_target if not os.path.exists(download_path): r = requests.get(source_url, stream=True) if r.status_code == 200: with open(download_path, "wb") as f: for chunk in r: f.write(chunk) def download_parallel(source_targets: typing.List[typing.Tuple[str, pathlib.Path]]): ThreadPool(WORKER_THREADS).imap_unordered(download_item, source_targets) def main(csv_path: pathlib.Path, source_column: str, download_prefix: str): with open(csv_path) as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=",") download_dir = pathlib.Path(download_prefix) row_num = 0 source_targets = [] for row in csv_reader: # Example: # https://covidtracking.com/screenshots/AL/AL-20210307-230802.png source_url = row[source_column] state, filename = row[source_column].split("/")[-2:] (download_dir / state).mkdir(parents=True, exist_ok=True) source_targets.append((source_url, download_dir / state / filename)) row_num += 1 if row_num % WORKER_THREADS == 0: download_parallel(source_targets) source_targets = [] download_parallel(source_targets) if __name__ == "__main__": assert os.environ["CSV_PATH"] assert os.environ["SOURCE_COLUMN"] assert os.environ["DOWNLOAD_PREFIX"] main( csv_path=pathlib.Path(os.environ["CSV_PATH"]).expanduser(), source_column=os.environ["SOURCE_COLUMN"], download_prefix=os.environ["DOWNLOAD_PREFIX"], )
py
1a4458ab8eb86e254dbeb3657ce77bc1837b883e
from django import forms from api.models import JournalEntry, JournalEntryLine, Period, Account class NewJournalEntryForm(forms.ModelForm): period = forms.ModelChoiceField( queryset=Period.objects.all(), required=True, to_field_name="slug") class Meta: model = JournalEntry fields = ( 'period', 'date', 'memo', 'is_adjusting_entry', 'is_closing_entry',) class NewJournalEntryLineForm(forms.ModelForm): account = forms.ModelChoiceField( queryset=Account.objects.all(), required=True, to_field_name="slug") class Meta: model = JournalEntryLine fields = ('account', 'type', 'amount',)
py
1a44593314effff83e470db623a1ae6817f47703
# -*- coding: utf-8 -*- # Generated by Django 1.11.21 on 2019-11-28 15:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('config', '0003_auto_20191127_2223'), ] operations = [ migrations.AlterField( model_name='sidebar', name='status', field=models.PositiveIntegerField(choices=[(1, '展示'), (0, '隐藏')], default=1, verbose_name='状态'), ), ]
py
1a445a11f4aa9ec7aa8ab6477a68eef73d331c17
import sys from datetime import datetime from awsglue.transforms import * from awsglue.utils import getResolvedOptions from awsglue.context import GlueContext from awsglue.dynamicframe import DynamicFrame from awsglue.job import Job from pyspark.sql.functions import * from pyspark.context import SparkContext from pyspark.sql import SparkSession from pyspark.sql.types import StringType #sc = SparkContext() sc = SparkContext.getOrCreate() sc.setLogLevel("INFO") glueContext = GlueContext(sc) job = Job(glueContext) args = getResolvedOptions(sys.argv, ['JOB_NAME', 'database_name', 'raw_pose_data_table', 'redshift_conn', 'redshift_role']) job.init(args['JOB_NAME'], args) print("Database: {}".format(args['database_name'])) print("Raw Events Table: {}".format(args['raw_pose_data_table'])) # catalog: database and table names db_name = args['database_name'] raw_pose_data_table = args['raw_pose_data_table'] # Output location redshift_role = args['redshift_role'] redshift_conn = args['redshift_conn'] redshift_preaction_query = "CREATE TABLE IF NOT EXISTS public.pose_data (msg_id VARCHAR(36),camera_location VARCHAR(20),msg_type VARCHAR(20),identified_action VARCHAR(40),event_time TIMESTAMP,event_time_qs VARCHAR(20),person_count SMALLINT,s3uri VARCHAR(150));" redshift_options = { "dbtable": "pose_data", "database": "default_db", "aws_iam_role": redshift_role, "preactions": redshift_preaction_query, "extracopyoptions": "COMPUPDATE ON" } # Helper Function replaces the timestamp into Redshift-compliant format def applyTransform(rec): rec["event_time"] = datetime.utcfromtimestamp(rec["timestamp"]).strftime("%m %d, %Y %H:%M:%S") rec["event_time_qs"] = datetime.utcfromtimestamp(rec["timestamp"]).strftime("%Y-%m-%d %H:%M:%S") return rec # Create dynamic frame from the source tables raw_pose_data = glueContext.create_dynamic_frame.from_catalog( database=db_name, table_name=raw_pose_data_table, # transformation_ctx = "events" ) print("---- Raw data schema: ----") raw_pose_data.printSchema() # Drop the pose field pose_dropped = raw_pose_data.drop_fields(paths=["pose", "year", "month", "day", "hour"], transformation_ctx="drop_pose") # Rename some fields to avoid Postgres reserved column name loc_renamed_df = pose_dropped.rename_field("location", "camera_location", transformation_ctx="rename_location") act_renamed_df = loc_renamed_df.rename_field("action", "identified_action", transformation_ctx="rename_action") # Maps a transformation function over each record to change timestamp from epoch to redshift-compliant format transformed_pose_data = Map.apply(frame = act_renamed_df, f = applyTransform) final_pose_data = transformed_pose_data.drop_fields(paths=["timestamp"], transformation_ctx="drop_timestamp") print("---- Processed data schema: ----") final_pose_data.printSchema() record_count = final_pose_data.count() print("Processed record count: {}".format(record_count)) # Avoid errors if Glue Job Bookmark detects no new data to process and records = 0. if record_count > 0: glueContext.write_dynamic_frame.from_jdbc_conf( frame=final_pose_data, catalog_connection=redshift_conn, connection_options=redshift_options, redshift_tmp_dir=args["TempDir"]) else: print("Glue Job Bookmark detected no new files to process") job.commit()
py
1a445a25cc61edf6ba07663a70d4676a3e06bce6
""" https://github.com/tomchristie/django-rest-framework/issues/944 """ import re first_cap_re = re.compile('(.)([A-Z][a-z]+)') all_cap_re = re.compile('([a-z0-9])([A-Z])') def camelcase_to_underscore(name): s1 = first_cap_re.sub(r'\1_\2', name) return all_cap_re.sub(r'\1_\2', s1).lower() def underscore_to_camelcase(name, lower_first=True): result = ''.join(char.capitalize() for char in name.split('_')) if lower_first: return result[0].lower() + result[1:] else: return result def recursive_key_map(function, data): if isinstance(data, dict): new_dict = {} for key, value in data.items(): if isinstance(key, str): new_key = function(key) new_dict[new_key] = recursive_key_map(function, value) return new_dict elif isinstance(data, (list, tuple)): return [recursive_key_map(function, value) for value in data] else: return data
py
1a445abf2959176e268aa3d953f8223fa7efb5d6
""" This file offers the methods to automatically retrieve the graph Vibrio palustris. The graph is automatically retrieved from the STRING repository. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen import Graph # pylint: disable=import-error def VibrioPalustris( directed: bool = False, preprocess: bool = True, load_nodes: bool = True, verbose: int = 2, cache: bool = True, cache_path: str = "graphs/string", version: str = "links.v11.5", **additional_graph_kwargs: Dict ) -> Graph: """Return new instance of the Vibrio palustris graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False Wether to load the graph as directed or undirected. By default false. preprocess: bool = True Whether to preprocess the graph to be loaded in optimal time and memory. load_nodes: bool = True, Whether to load the nodes vocabulary or treat the nodes simply as a numeric range. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache: bool = True Whether to use cache, i.e. download files only once and preprocess them only once. cache_path: str = "graphs" Where to store the downloaded graphs. version: str = "links.v11.5" The version of the graph to retrieve. The available versions are: - homology.v11.5 - physical.links.v11.5 - links.v11.5 additional_graph_kwargs: Dict Additional graph kwargs. Returns ----------------------- Instace of Vibrio palustris graph. References --------------------- Please cite the following if you use the data: ```bib @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } ``` """ return AutomaticallyRetrievedGraph( graph_name="VibrioPalustris", repository="string", version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
py
1a445ac21ac218f57cdfc900285965379732b6a7
# (C) Copyright 2020 ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. # import logging import re import requests from .heuristics import Part, parts_heuristics LOG = logging.getLogger(__name__) # S3 does not support multiple ranges class S3Streamer: def __init__(self, url, request, parts, headers, **kwargs): self.url = url self.parts = parts self.request = request self.headers = dict(**headers) self.kwargs = kwargs def __call__(self, chunk_size): # See https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html headers = dict(**self.headers) # TODO: add assertions for i, part in enumerate(self.parts): if i == 0: request = self.request else: offset, length = part headers["range"] = f"bytes={offset}-{offset+length-1}" request = requests.get( self.url, stream=True, headers=headers, **self.kwargs, ) try: request.raise_for_status() except Exception: LOG.error("URL %s: %s", self.url, request.text) raise header = request.headers bytes = header["content-range"] LOG.debug("HEADERS %s", header) m = re.match(r"^bytes (\d+)d?-(\d+)d?/(\d+)d?$", bytes) assert m, header start, end, total = int(m.group(1)), int(m.group(2)), int(m.group(3)) assert end >= start assert start < total assert end < total assert start == part.offset, (bytes, part) # (end + 1 == total) means that we overshoot the end of the file, # this happens when we round transfer blocks assert (end == part.offset + part.length - 1) or (end + 1 == total), ( bytes, part, ) yield from request.iter_content(chunk_size) class MultiPartStreamer: def __init__(self, url, request, parts, boundary, **kwargs): self.request = request self.size = int(request.headers["content-length"]) self.encoding = "utf-8" self.parts = parts self.boundary = boundary def __call__(self, chunk_size): from email.parser import HeaderParser from requests.structures import CaseInsensitiveDict header_parser = HeaderParser() marker = f"--{self.boundary}\r\n".encode(self.encoding) end_header = b"\r\n\r\n" end_data = b"\r\n" end_of_input = f"--{self.boundary}--\r\n".encode(self.encoding) if chunk_size < len(end_data): chunk_size = len(end_data) iter_content = self.request.iter_content(chunk_size) chunk = next(iter_content) # Some servers start with \r\n if chunk[:2] == end_data: chunk = chunk[2:] LOG.debug("MARKER %s", marker) part = 0 while True: while len(chunk) < max(len(marker), len(end_of_input)): more = next(iter_content) assert more is not None chunk += more if chunk.find(end_of_input) == 0: assert part == len(self.parts) break pos = chunk.find(marker) assert pos == 0, (pos, chunk) chunk = chunk[pos + len(marker) :] while True: pos = chunk.find(end_header) if pos != -1: break more = next(iter_content) assert more is not None chunk += more assert len(chunk) < 1024 * 1024 pos += len(end_header) header = chunk[:pos].decode(self.encoding) header = CaseInsensitiveDict(header_parser.parsestr(header)) chunk = chunk[pos:] # kind = header["content-type"] bytes = header["content-range"] LOG.debug("HEADERS %s", header) m = re.match(r"^bytes (\d+)d?-(\d+)d?/(\d+)d?$", bytes) assert m, header start, end, total = int(m.group(1)), int(m.group(2)), int(m.group(3)) assert end >= start assert start < total assert end < total size = end - start + 1 assert start == self.parts[part].offset # (end + 1 == total) means that we overshoot the end of the file, # this happens when we round transfer blocks assert (end == self.parts[part].offset + self.parts[part].length - 1) or ( end + 1 == total ), (bytes, self.parts[part]) while size > 0: if len(chunk) >= size: yield chunk[:size] chunk = chunk[size:] size = 0 else: yield chunk size -= len(chunk) chunk = next(iter_content) assert chunk.find(end_data) == 0 chunk = chunk[len(end_data) :] part += 1 class DecodeMultipart: def __init__(self, url, request, parts, **kwargs): self.request = request assert request.status_code == 206, request.status_code content_type = request.headers["content-type"] if content_type.startswith("multipart/byteranges; boundary="): _, boundary = content_type.split("=") # print("****** MULTI-PART supported by server", url) self.streamer = MultiPartStreamer(url, request, parts, boundary, **kwargs) else: # print("****** MULTI-PART *NOT* supported by server", url) self.streamer = S3Streamer(url, request, parts, **kwargs) def __call__(self, chunk_size): return self.streamer(chunk_size) class PartFilter: def __init__(self, parts, positions=None): self.parts = parts if positions is None: positions = [x.offset for x in parts] self.positions = positions assert len(self.parts) == len(self.positions) def __call__(self, streamer): def execute(chunk_size): stream = streamer(chunk_size) chunk = next(stream) pos = 0 for (_, length), offset in zip(self.parts, self.positions): offset -= pos while offset > len(chunk): pos += len(chunk) offset -= len(chunk) chunk = next(stream) assert chunk chunk = chunk[offset:] pos += offset size = length while size > 0: if len(chunk) >= size: yield chunk[:size] chunk = chunk[size:] pos += size size = 0 else: yield chunk size -= len(chunk) pos += len(chunk) chunk = next(stream) # Drain stream, so we don't created error messages in the server's logs while True: try: next(stream) except StopIteration: break return execute def compress_parts(parts): last = -1 result = [] # Compress and check for offset, length in parts: assert offset >= 0 and length > 0 assert offset >= last, ( f"Offsets and lengths must be in order, and not overlapping:" f" offset={offset}, end of previous part={last}" ) if offset == last: # Compress offset, prev_length = result.pop() length += prev_length result.append((offset, length)) last = offset + length return tuple(Part(offset, length) for offset, length in result) def compute_byte_ranges(parts, method, url, statistics_gatherer): if callable(method): blocks = method(parts) else: blocks = parts_heuristics(method, statistics_gatherer)(parts) blocks = compress_parts(blocks) assert len(blocks) > 0 assert len(blocks) <= len(parts) statistics_gatherer( "byte-ranges", method=str(method), url=url, parts=parts, blocks=blocks, ) i = 0 positions = [] block_offset, block_length = blocks[i] for offset, length in parts: while offset > block_offset + block_length: i += 1 block_offset, block_length = blocks[i] start = i while offset + length > block_offset + block_length: i += 1 block_offset, block_length = blocks[i] end = i # Sanity check: assert that each parts is contain in a rounded part assert start == end positions.append( offset - blocks[i].offset + sum(blocks[j].length for j in range(i)) ) return blocks, positions
py
1a445b0ab16526d0b8e13b0b9d8cf239ecde344e
import datetime import gc import numpy as np import os import pandas as pd os.environ['KMP_DUPLICATE_LIB_OK']='True' # MacOS fix for libomp issues (https://github.com/dmlc/xgboost/issues/1715) import lightgbm as lgb import xgboost as xgb from sklearn.metrics import log_loss, roc_auc_score from sklearn.model_selection import KFold, RepeatedKFold, GroupKFold, StratifiedKFold from sklearn.decomposition import PCA from sklearn.preprocessing import LabelEncoder from sklearn.svm import NuSVC from tqdm import tqdm as tqdm from kinoa import kinoa from scipy.stats import ttest_ind, ks_2samp def dprint(*args, **kwargs): print("[{}] ".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")) + \ " ".join(map(str,args)), **kwargs) dprint('PID: {}'.format(os.getpid())) script_id = 0 data_path = '../input/' id_col = 'encounter_id' target_col = 'hospital_death' fillna_with_est = False train_lgbm = True train_xgb = False # train_catboost = False train = pd.read_csv(os.path.join(data_path, 'training_v2.csv')) test = pd.read_csv(os.path.join(data_path, 'unlabeled.csv')) # Drop constant columns constant_cols = [] for c in train.columns: if train[c].nunique(dropna=False) < 2: constant_cols.append(c) print(f'constant_cols in train: {constant_cols}') train.drop(constant_cols, axis=1, inplace=True) test.drop(constant_cols, axis=1, inplace=True) constant_cols = [] for c in test.columns: if c != target_col and test[c].nunique(dropna=False) < 2: constant_cols.append(c) print(f'constant_cols in test: {constant_cols}') train.drop(constant_cols, axis=1, inplace=True) test.drop(constant_cols, axis=1, inplace=True) # Add estimated variables to the dataset est_cols = [ { 'name': 'weight', 'fillna': False, }, { 'name': 'height', 'fillna': False, }, { 'name': 'apache_4a_hospital_death_prob', 'fillna': False, }, # { # 'name': 'apache_4a_icu_death_prob', # 'fillna': False, # }, # Worse # { # 'name': 'urineoutput_apache', # 'fillna': False, # }, # Worse # { # 'name': 'bmi', # 'fillna': False, # }, # Worse # { # 'name': 'glucose_apache', # 'fillna': False, # }, # Worse ] for c in est_cols: df = pd.read_csv(f'{c["name"]}_est.csv') train = train.merge(df, on=id_col, how='left') test = test.merge(df, on=id_col, how='left') if c['fillna']: train.loc[train[c['name']].isnull(), c['name']] = train[c['name'] + '_est'] test.loc[test[c['name']].isnull(), c['name']] = test[c['name'] + '_est'] train.drop([c['name'] + '_est'], axis=1, inplace=True) test.drop([c['name'] + '_est'], axis=1, inplace=True) dprint(train.shape, test.shape) # Extract features def extract_features(df): df['d1_temp_minmax'] = df['d1_temp_max'] - df['d1_temp_min'] df['d1_glucose_minmax'] = df['d1_glucose_max'] - df['d1_glucose_min'] df['d1_resprate_minmax'] = df['d1_resprate_max'] - df['d1_resprate_min'] df['d1_spo2_minmax'] = df['d1_spo2_max'] - df['d1_spo2_min'] df['d1_platelets_minmax'] = df['d1_platelets_max'] - df['d1_platelets_min'] # df['d1_heartrate_minmax'] = df['d1_heartrate_max'] - df['d1_heartrate_min'] # df['h1_heartrate_minmax'] = df['h1_heartrate_max'] - df['h1_heartrate_min'] # df['h1_temp_minmax'] = df['h1_temp_max'] - df['h1_temp_min'] # df['h1_glucose_minmax'] = df['h1_glucose_max'] - df['h1_glucose_min'] # df['h1_resprate_minmax'] = df['h1_resprate_max'] - df['h1_resprate_min'] # df['h1_spo2_minmax'] = df['h1_spo2_max'] - df['h1_spo2_min'] # df['h1_platelets_minmax'] = df['h1_platelets_max'] - df['h1_platelets_min'] # df['abmi'] = df['age']*100*100*df['weight']/df['height']/df['height'] df['apache_4a_hospicu_death_prob'] = df['apache_4a_hospital_death_prob'] + df['apache_4a_icu_death_prob'] # df['apache_4a_hospicu_death_prob_m'] = df['apache_4a_hospital_death_prob'] * df['apache_4a_icu_death_prob'] df['age_group'] = df['age']//5 df['weight_group'] = df['weight']//5 # df['hr_a'] = df['d1_heartrate_max']/df['age'] # df['hr_w'] = df['d1_heartrate_max']/df['weight'] if fillna_with_est: df['bmi'] = 100*100*df['weight']/df['height']/df['height'] else: df['bmi_w_est'] = 100*100*df['weight_est']/df['height']/df['height'] df['bmi_h_est'] = 100*100*df['weight']/df['height_est']/df['height_est'] df['bmi_wh_est'] = 100*100*df['weight_est']/df['height_est']/df['height_est'] # df['agi'] = df['weight']/df['age'] # df['hrw'] = df['d1_heartrate_max']/df['weight'] # cols = ['temp_apache', 'd1_temp_max', 'd1_temp_min', 'h1_temp_max', 'h1_temp_min'] # for c in cols: # df[c] = df[c]/36.6 pass extract_features(train) extract_features(test) train['is_test'] = 0 test['is_test'] = 1 df_all = pd.concat([train, test], axis=0) dprint('Label Encoder...') cols = [f_ for f_ in df_all.columns if df_all[f_].dtype == 'object'] print(cols) cnt = 0 for c in tqdm(cols): if c != id_col: # print(c) le = LabelEncoder() df_all[c] = le.fit_transform(df_all[c].astype(str)) cnt += 1 del le dprint('len(cols) = {}'.format(cnt)) gfs = ['hospital_id', 'icu_id', 'age_group', 'apache_3j_diagnosis', 'gender', 'ethnicity', 'apache_3j_bodysystem'] #+ \ # ['hospital_admit_source', 'icu_admit_source', 'icu_stay_type', 'icu_type', 'apache_2_bodysystem'] ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi'] # ffs = ['apache_4a_hospital_death_prob', 'apache_4a_icu_death_prob', 'bmi', 'bmi_w_est', 'bmi_h_est', 'bmi_wh_est', 'weight', 'height'] for gf in gfs: for ff in ffs: g = df_all.groupby(gf)[ff].agg(['mean', 'std', 'min', 'max']).reset_index() g.rename({'mean': f'{gf}_{ff}__mean', 'std': f'{gf}_{ff}__std', 'min': f'{gf}_{ff}__min', 'max': f'{gf}_{ff}__max'}, axis=1, inplace=True) df_all = df_all.merge(g, on=gf, how='left') train = df_all.loc[df_all['is_test'] == 0].drop(['is_test'], axis=1) test = df_all.loc[df_all['is_test'] == 1].drop(['is_test'], axis=1) del df_all gc.collect() features = list(train.columns.values) features.remove(id_col) features.remove(target_col) # Build the model cnt = 0 p_buf = [] n_splits = 4 n_repeats = 1 kf = RepeatedKFold( n_splits=n_splits, n_repeats=n_repeats, random_state=0) err_buf = [] undersampling = 0 lgb_params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'max_depth': 8, 'learning_rate': 0.05, 'feature_fraction': 0.85, 'bagging_fraction': 0.85, 'bagging_freq': 5, 'lambda_l1': 1.0, 'lambda_l2': 10.0, 'verbose': -1, 'num_threads': 4, } xgb_params = { 'max_depth': 8, 'eta': 0.05, 'objective': 'binary:logistic', 'subsample': 0.85, 'colsample_bytree': 0.85, 'alpha': 1, 'lambda': 1, 'eval_metric': 'auc', 'nthread': 4, } cols_to_drop = [ id_col, target_col, # 'patient_id', ] # cols_to_use = features X = train.drop(cols_to_drop, axis=1, errors='ignore') y = train[target_col].values X_test = test.drop(cols_to_drop, axis=1, errors='ignore') id_test = test[id_col].values # # Feature selection # cols_to_drop = [] # for c in X.columns: # # t = ttest_ind( # # X[c].fillna(X[c].mean()), # # X_test[c].fillna(X_test[c].mean())) # t = ttest_ind( # X[c].dropna(), # X_test[c].dropna()) # # print(c, t) # if t[1] < 0.001: # print(c, t) # cols_to_drop.append(c) # print(f'Dropping after statistical tests: {cols_to_drop}') # X = X.drop(cols_to_drop, axis=1, errors='ignore') # X_test = X_test.drop(cols_to_drop, axis=1, errors='ignore') n_features = X.shape[1] dprint(f'n_features: {n_features}') p_test = [] for fold_i, (train_index, valid_index) in enumerate(kf.split(X, y)): x_train = X.iloc[train_index] x_valid = X.iloc[valid_index] y_train = y[train_index] y_valid = y[valid_index] x_test = X_test.copy() # # Frequency encoding # encoding = x_train.groupby('height').size() # encoding = encoding/len(x_train) # x_train['height_fenc'] = x_train['height'].map(encoding) # x_valid['height_fenc'] = x_valid['height'].map(encoding) # x_test['height_fenc'] = x_test['height'].map(encoding) feature_names = list(x_train.columns) p_valid = [] # LGBM if train_lgbm: params = lgb_params.copy() # pca = PCA(n_components=144) # x_train = pca.fit_transform(x_train) # x_valid = pca.transform(x_valid) # x_test_pca = pca.transform(x_test) # feature_names = ['pca_{}'.format(i) for i in range(x_train.shape[1])] lgb_train = lgb.Dataset( x_train, y_train, feature_name=feature_names, ) lgb_train.raw_data = None lgb_valid = lgb.Dataset( x_valid, y_valid, ) lgb_valid.raw_data = None model = lgb.train( params, lgb_train, num_boost_round=5000, valid_sets=[lgb_valid], early_stopping_rounds=100, verbose_eval=100, ) if fold_i == 0: importance = model.feature_importance() model_fnames = model.feature_name() tuples = sorted(zip(model_fnames, importance), key=lambda x: x[1])[::-1] tuples = [x for x in tuples if x[1] > 0] print('Important features:') for i in range(20): if i < len(tuples): print(tuples[i]) else: break del importance, model_fnames, tuples p_lgbm = model.predict(x_valid, num_iteration=model.best_iteration) p_valid.append(p_lgbm) err = roc_auc_score(y_valid, p_lgbm) # err_buf.append(err) dprint('{} LGBM AUC: {:.4f}'.format(fold_i, err)) p_lgbm_test = model.predict(x_test[feature_names], num_iteration=model.best_iteration) p_test.append(p_lgbm_test) # XGB if train_xgb: params = xgb_params.copy() dtrain = xgb.DMatrix(x_train, label=y_train) dvalid = xgb.DMatrix(x_valid, label=y_valid) dtest = xgb.DMatrix(x_test[feature_names]) evallist = [(dvalid, 'eval')] bst = xgb.train( params, dtrain, 5000, evallist, early_stopping_rounds=100, verbose_eval=100 ) p_xgb = bst.predict(dvalid, ntree_limit=bst.best_iteration) p_valid.append(p_xgb) err = roc_auc_score(y_valid, p_xgb) # err_buf.append(err) dprint('{} XGB AUC: {:.4f}'.format(fold_i, err)) p_xgb_test = bst.predict(dtest, ntree_limit=bst.best_iteration) p_test.append(p_xgb_test) # Ensemble evaluation if len(p_valid) > 1: p_ens = np.mean(p_valid, axis=0) err = roc_auc_score(y[valid_index], p_ens) dprint('{} ENS AUC: {:.4f}'.format(fold_i, err)) err_buf.append(err) # x_train = X.iloc[train_index] # x_valid = X.iloc[valid_index] # model = NuSVC( # probability=True, # kernel='poly', # degree=4, # gamma='auto', # random_state=0, # nu=0.6, # coef0=0.05) # model.fit(x_train, y[train_index]) # p_nusvc = model.predict_proba(x_valid)[:, 1] # err = roc_auc_score(y[valid_index], p_nusvc) # print('{} {} NuSVC AUC: {}'.format(v, cnt + 1, err)) # p_nusvc_test = model.predict_proba(x_test)[:, 1] # p_mean = 0.1*p_lgbm + 0.9*p_nusvc # err = roc_auc_score(y[valid_index], p_mean) # print('{} {} ENS AUC: {}'.format(v, cnt + 1, err)) # p = 0.1*p_lgbm_test + 0.9*p_nusvc_test del model, lgb_train, lgb_valid gc.collect # break err_mean = np.mean(err_buf) err_std = np.std(err_buf) dprint('AUC: {:.4f} +/- {:.4f}'.format(err_mean, err_std)) test_preds = np.mean(p_test, axis=0) submission = pd.DataFrame() submission[id_col] = id_test submission[target_col] = test_preds submission.to_csv('submission{}.csv'.format(script_id), index=False) # Save backup files = [ 'model{}.py'.format(script_id), 'model{}.log'.format(script_id), 'submission{}.csv'.format(script_id), # 'feature_importance{}.txt'.format(script_id), # 'train_weights{}.csv'.format(script_id), ] experiment_name = 'Exp{}'.format(script_id) params = {} params['n_models'] = cnt scores = {} scores['auc_mean'] = err_mean scores['auc_std'] = err_std scores['kaggle'] = np.nan other = {} other['n_features'] = n_features other['n_splits'] = n_splits comments = '' kinoa.save( files, experiment_name=experiment_name, params=params, scores=scores, other=other, comments=comments, working_dir='', sort_log_by='experiment_datetime', sort_log_ascending=True, columns_order={'scores.kaggle': -1, 'scores.auc_std': -2, 'scores.auc_mean': -3} ) dprint('Done!')
py
1a445bd52d5dab35f277587c3ab52c17aa65bd27
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Sets the IAM policy for the repository.""" from googlecloudsdk.api_lib.source.repos import sourcerepo from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.iam import iam_util @base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA) class SetIamPolicy(base.UpdateCommand): """Set the IAM policy for the named repository. This command sets the IAM policy for the given repository from the policy in the provided file. ## EXAMPLES To set the IAM policy, issue the following command: $ {command} REPOSITORY_NAME POLICY_FILE """ @staticmethod def Args(parser): parser.add_argument( 'name', metavar='REPOSITORY_NAME', help='Name of the repository.') parser.add_argument( 'policy_file', help=('JSON or YAML file with IAM policy. ' 'See https://cloud.google.com/resource-manager/' 'reference/rest/Shared.Types/Policy')) parser.display_info.AddFormat('default') def Run(self, args): """Sets the IAM policy for the repository. Args: args: argparse.Namespace, the arguments this command is run with. Returns: (sourcerepo_v1_messsages.Policy) The IAM policy. Raises: ToolException: on project initialization errors. """ res = sourcerepo.ParseRepo(args.name) source = sourcerepo.Source() policy = iam_util.ParseYamlorJsonPolicyFile(args.policy_file, source.messages.Policy) result = source.SetIamPolicy(res, policy) iam_util.LogSetIamPolicy(res.Name(), 'repo') return result
py
1a445c7c51b1df71fad7e08991b176f57ba3dff2
# -*- coding: utf-8 -*- # Copyright (c) 2015, indictrans and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class Standard(Document): pass
py
1a445ca6f37cd4d17a568d56c7337c882a76710d
import deepdanbooru.model.layers import deepdanbooru.model.losses from .resnet import create_resnet_152 from .resnet import create_resnet_custom_v1 from .resnet import create_resnet_custom_v2 from .resnet import create_resnet_custom_v3
py
1a445d3d15930aae9bbac97556a2aaad60e21d07
""" Twitter credentials """ consumer_key = "" consumer_secret = "" access_token = "" access_token_secret = ""
py
1a445e9f3575f081a565b7009e58b2d363b2ec22
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2008-2019 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://trac.edgewall.org/log/. """functional_tests While unittests work well for testing facets of an implementation, they fail to provide assurances that the user-visible functions work in practice. Here, we complement the unittests with functional tests that drive the system as a user would to verify user visible functionality. These functional tests are run as part of the unittests. So, we use Twill to verify Trac's functionality as served by tracd (and in the future, other frontends). Unlike most unittests, we setup a single fixture against which we run all the testcases. This is for two reasons: Primarily, that provides us with a more complex set of data to test against and thus more room for triggering bugs. Secondarily, the cost of setting up a new Trac environment and Subversion repository is significant, so recreating the fixture for each test would be very costly. There are two primary objects involved in the testing, the FunctionalTestEnvironment and the FunctionalTester. FunctionalTestEnvironment represents the Trac environment, the Subversion repository, and the server. The server will be run on a random local port in the range 8000-8999. A subdirectory named 'tracenv' will be created containing the Trac environment, Subversion repository, and the user authentication information. An 'admin' user is created and given TRAC_ADMIN privs early in the testing. There are other users added as well. All accounts are setup with a password equalling the username. The test environment is left behind after the testing has completed to assist in debugging. FunctionalTester provides code reuse for the testcases to allow a higher-level description of the more complicated bugs. For example, creating a new ticket is the first step in regression testing many things, so FunctionalTester provides a create_ticket() method. That method is written as if it were itself a testcase for creating a ticket, so there is a testcase that simply calls that method, and other testcases that use it as a higher-level step don't have to worry about basic issues such as if the ticket was successfully created. Requirements: - Twill (http://twill.idyll.org/) - lxml for XHTML validation (optional) """ import os import unittest from pkg_resources import parse_version import trac # Handle missing twill so we can print a useful 'SKIP' # message. We import subprocess first to allow customizing it on Windows # to select pywin32 in favor of _subprocess for low-level calls. If Twill # is allowed to load first, its (unmodified) copy will always be loaded. import subprocess from trac.tests.functional.better_twill import b, tc, twill try: # This is the first indicator of whether the subversion bindings are # correctly installed. from svn import core has_svn = True except ImportError: has_svn = False from trac.test import TestSetup, TestCaseSetup internal_error = 'Trac detected an internal error:' trac_source_tree = os.path.normpath(os.path.join(trac.__file__, '..', '..')) if twill: from trac.tests.functional.testenv import FunctionalTestEnvironment from trac.tests.functional.svntestenv import SvnFunctionalTestEnvironment from trac.tests.functional.tester import FunctionalTester class FunctionalTestSuite(TestSetup): """TestSuite that provides a test fixture containing a FunctionalTestEnvironment and a FunctionalTester. """ if has_svn: env_class = SvnFunctionalTestEnvironment else: env_class = FunctionalTestEnvironment tester_class = FunctionalTester def __init__(self): if parse_version(twill.__version__) != parse_version('0.9'): raise ImportError("Twill 0.9 is required. Found version %s." % twill.__version__) super(FunctionalTestSuite, self).__init__() def setUp(self, port=None): """If no port is specified, use a semi-random port and subdirectory 'testenv'; but if a port is specified, use that port and subdirectory 'testenv<portnum>'. """ if port is None: try: port = int(os.getenv('TRAC_TEST_PORT')) except (TypeError, ValueError): pass env_path = os.getenv('TRAC_TEST_ENV_PATH') if not env_path: env_name = 'testenv%s' % (port or '') env_path = os.path.join(trac_source_tree, env_name) else: env_path += str(port or '') if port is None: port = 8000 + os.getpid() % 1000 baseurl = "http://127.0.0.1:%s" % port self._testenv = self.env_class(env_path, port, baseurl) # functional-testing.log gets the twill output self.functional_test_log = \ os.path.join(env_path, 'functional-testing.log') twill.set_output(open(self.functional_test_log, 'w')) self._testenv.start() self._tester = self.tester_class(baseurl) self.fixture = (self._testenv, self._tester) self._testenv.set_config('project', 'name', 'Functional Tests') def tearDown(self): self._testenv.stop() class FunctionalTestCaseSetup(TestCaseSetup): """Convenience class to expand the fixture into the _testenv and _tester attributes.""" def setUp(self): self._testenv, self._tester = self.fixture class FunctionalTwillTestCaseSetup(FunctionalTestCaseSetup): failureException = twill.errors.TwillAssertionError else: # We're going to have to skip the functional tests class FunctionalTestSuite(TestSetup): def __init__(self): raise ImportError("Twill not installed") class FunctionalTwillTestCaseSetup(object): pass class FunctionalTestCaseSetup(object): pass # Twill's find command accepts regexes; some convenient but complex regexes # & regex factories are provided here (only one so far): def regex_owned_by(username): return '(Owned by:(<[^>]*>|\\n| )*%s)' % username def functionalSuite(): suite = FunctionalTestSuite() return suite def test_suite(): try: suite = functionalSuite() import trac.tests.functional.testcases trac.tests.functional.testcases.functionalSuite(suite) import trac.versioncontrol.tests trac.versioncontrol.tests.functionalSuite(suite) import trac.ticket.tests trac.ticket.tests.functionalSuite(suite) import trac.mimeview.tests trac.mimeview.tests.functionalSuite(suite) import trac.prefs.tests trac.prefs.tests.functionalSuite(suite) import trac.wiki.tests trac.wiki.tests.functionalSuite(suite) import trac.timeline.tests trac.timeline.tests.functionalSuite(suite) import trac.admin.tests trac.admin.tests.functionalSuite(suite) import trac.search.tests trac.search.tests.functionalSuite(suite) # The db tests should be last since the backup test occurs there. import trac.db.tests trac.db.tests.functionalSuite(suite) except ImportError as e: print("SKIP: functional tests (%s)" % e) # No tests to run, provide an empty suite. suite = unittest.TestSuite() return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
py
1a445ecc8a9448d8d09b4e2ca99bbd4d3a3245b1
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._inputs import * __all__ = ['MyWorkbook'] class MyWorkbook(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, category: Optional[pulumi.Input[str]] = None, display_name: Optional[pulumi.Input[str]] = None, id: Optional[pulumi.Input[str]] = None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedIdentityArgs']]] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, resource_name_: Optional[pulumi.Input[str]] = None, serialized_data: Optional[pulumi.Input[str]] = None, source_id: Optional[pulumi.Input[str]] = None, storage_uri: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, type: Optional[pulumi.Input[str]] = None, version: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ An Application Insights private workbook definition. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] category: Workbook category, as defined by the user at creation time. :param pulumi.Input[str] display_name: The user-defined name of the private workbook. :param pulumi.Input[str] id: Azure resource Id :param pulumi.Input[pulumi.InputType['ManagedIdentityArgs']] identity: Identity used for BYOS :param pulumi.Input[str] kind: The kind of workbook. Choices are user and shared. :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] name: Azure resource name :param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive. :param pulumi.Input[str] resource_name_: The name of the Application Insights component resource. :param pulumi.Input[str] serialized_data: Configuration of this particular private workbook. Configuration data is a string containing valid JSON :param pulumi.Input[str] source_id: Optional resourceId for a source resource. :param pulumi.Input[str] storage_uri: BYOS Storage Account URI :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags :param pulumi.Input[str] type: Azure resource type :param pulumi.Input[str] version: This instance's version of the data model. This can change as new features are added that can be marked private workbook. """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if category is None: raise TypeError("Missing required property 'category'") __props__['category'] = category if display_name is None: raise TypeError("Missing required property 'display_name'") __props__['display_name'] = display_name __props__['id'] = id __props__['identity'] = identity __props__['kind'] = kind __props__['location'] = location __props__['name'] = name if resource_group_name is None: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name if resource_name_ is None: raise TypeError("Missing required property 'resource_name_'") __props__['resource_name'] = resource_name_ if serialized_data is None: raise TypeError("Missing required property 'serialized_data'") __props__['serialized_data'] = serialized_data __props__['source_id'] = source_id __props__['storage_uri'] = storage_uri __props__['tags'] = tags __props__['type'] = type __props__['version'] = version __props__['time_modified'] = None __props__['user_id'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:insights/latest:MyWorkbook"), pulumi.Alias(type_="azure-nextgen:insights/v20150501:MyWorkbook")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(MyWorkbook, __self__).__init__( 'azure-nextgen:insights/v20201020:MyWorkbook', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'MyWorkbook': """ Get an existing MyWorkbook resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return MyWorkbook(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def category(self) -> pulumi.Output[str]: """ Workbook category, as defined by the user at creation time. """ return pulumi.get(self, "category") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: """ The user-defined name of the private workbook. """ return pulumi.get(self, "display_name") @property @pulumi.getter def identity(self) -> pulumi.Output[Optional['outputs.ManagedIdentityResponse']]: """ Identity used for BYOS """ return pulumi.get(self, "identity") @property @pulumi.getter def kind(self) -> pulumi.Output[Optional[str]]: """ The kind of workbook. Choices are user and shared. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[Optional[str]]: """ Azure resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="serializedData") def serialized_data(self) -> pulumi.Output[str]: """ Configuration of this particular private workbook. Configuration data is a string containing valid JSON """ return pulumi.get(self, "serialized_data") @property @pulumi.getter(name="sourceId") def source_id(self) -> pulumi.Output[Optional[str]]: """ Optional resourceId for a source resource. """ return pulumi.get(self, "source_id") @property @pulumi.getter(name="storageUri") def storage_uri(self) -> pulumi.Output[Optional[str]]: """ BYOS Storage Account URI """ return pulumi.get(self, "storage_uri") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter(name="timeModified") def time_modified(self) -> pulumi.Output[str]: """ Date and time in UTC of the last modification that was made to this private workbook definition. """ return pulumi.get(self, "time_modified") @property @pulumi.getter def type(self) -> pulumi.Output[Optional[str]]: """ Azure resource type """ return pulumi.get(self, "type") @property @pulumi.getter(name="userId") def user_id(self) -> pulumi.Output[str]: """ Unique user id of the specific user that owns this private workbook. """ return pulumi.get(self, "user_id") @property @pulumi.getter def version(self) -> pulumi.Output[Optional[str]]: """ This instance's version of the data model. This can change as new features are added that can be marked private workbook. """ return pulumi.get(self, "version") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
py
1a445f1f4d4b4b8a4c67b013d34b3547ddbf9418
from .utils import * from .QFunction import * import torch from torch import nn import torch.nn.functional as F from torch.distributions.normal import Normal class MLP_SquashedGaussianActor(nn.Module): def __init__(self, observation_dim, action_dim, hidden_sizes, activation, act_limit): super().__init__() self.log_std_max = 2 self.log_std_min = -20 self.net = create_mlp([observation_dim] + list(hidden_sizes), activation, activation) self.mu_layer = nn.Linear(hidden_sizes[-1], action_dim) self.log_std_layer = nn.Linear(hidden_sizes[-1], action_dim) self.act_limit = act_limit def forward(self, observation, deterministic=False, with_log_prob=True): net_out = self.net(observation) # computer the \mu and \sigma of the gaussian mu = self.mu_layer(net_out) log_std = self.log_std_layer(net_out) log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max) std = torch.exp(log_std) # Pre-squash distribution and sample pi_distribution = Normal(mu, std) if deterministic: # only used for evaluating policy at test time. pi_action = mu else: pi_action = pi_distribution.rsample() if with_log_prob: # Appendix C log_pro_pi = pi_distribution.log_prob(pi_action).sum(dim=-1) log_pro_pi -= (2 * (np.log(2) - pi_action - F.softplus(-2*pi_action))).sum(dim=-1) else: log_pro_pi = None pi_action = torch.tanh(pi_action) pi_action = self.act_limit * pi_action return pi_action, log_pro_pi
py
1a445f33681fc3d32f2aed3f2cbdbb26bb86c824
import fnmatch import string class Match: ACCEPT = 1 REJECT = 2 UNKNOWN = 3 class PathFilter(object): class Rule(object): def __init__(self, pattern, match_action): assert match_action in (Match.ACCEPT, Match.REJECT) self.pattern = pattern self.match_action = match_action def match(self, path): if fnmatch.fnmatch(path, self.pattern): return self.match_action return Match.UNKNOWN def __init__(self, rules): self._rules = rules def match(self, path): """Tests the path against all rules in this filter""" for rule in self._rules: if rule.match(path) == Match.ACCEPT: return True elif rule.match(path) == Match.REJECT: return False return True @staticmethod def from_rule_list(rule_list): """Read from a dict. `version` is ignored""" rules = [] for rule_string in rule_list: rule_string = rule_string.strip() rule_comps = rule_string.split() match_action_string = rule_comps[0] if match_action_string == '+': match_action = Match.ACCEPT elif match_action_string == '-': match_action = Match.REJECT else: raise ValueError("unknown match type: %s" % (match_action_string)) pattern = string.join(rule_comps[1:], ' ') rules.append(PathFilter.Rule(pattern, match_action)) return PathFilter(rules)
py
1a445f52a5c20d432fcd55697e328cf545221194
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor class NRLTVIE(InfoExtractor): _VALID_URL = r"https?://(?:www\.)?nrl\.com/tv(/[^/]+)*/(?P<id>[^/?&#]+)" _TEST = { "url": "https://www.nrl.com/tv/news/match-highlights-titans-v-knights-862805/", "info_dict": { "id": "YyNnFuaDE6kPJqlDhG4CGQ_w89mKTau4", "ext": "mp4", "title": "Match Highlights: Titans v Knights", }, "params": { # m3u8 download "skip_download": True, "format": "bestvideo", }, } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) q_data = self._parse_json( self._html_search_regex(r'(?s)q-data="({.+?})"', webpage, "player data"), display_id, ) ooyala_id = q_data["videoId"] return self.url_result( "ooyala:" + ooyala_id, "Ooyala", ooyala_id, q_data.get("title") )
py
1a445f563f31407c11cf5716b652e059becea387
#The simplest way to work with zlib requires holding all of the data to be compressed or decompressed in memory. import zlib import binascii original_data = b'This is the original text.' print('Original :', len(original_data), original_data) compressed = zlib.compress(original_data) print('Compressed :', len(compressed), binascii.hexlify(compressed)) decompressed = zlib.decompress(compressed) print('Decompressed :', len(decompressed), decompressed) #示例演示了少量数据的压缩版本可能比未压缩版本大。 # 虽然实际结果取决于输入数据,但观察小数据集的压缩开销很有意思 """ output: Original : 26 b'This is the original text.' Compressed : 32 b'789c0bc9c82c5600a2928c5485fca2ccf4ccbcc41c8592d48a123d007f2f097e' Decompressed : 26 b'This is the original text.' """
py
1a445fb5096457cc7f5cb27b6d30b25bcf85a876
""" WSGI config for project project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings") from django.core.wsgi import get_wsgi_application application = get_wsgi_application()
py
1a44608b719d7955615597e24f021d482f6073d6
# !/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals # -------------------------------------------# # author: sean lee # # email: [email protected] # #--------------------------------------------# """MIT License Copyright (c) 2018 Sean Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" import sys if sys.version_info[0] == 2: reload(sys) sys.setdefaultencoding('utf8') range = xrange import cPickle as pickle else: import pickle import io from ..module import Module from ..utils import native_content class Radical(Module): __notsave__ = [] __onlysave__ = ['dictionary'] def __init__(self): self.dictionary = {} def train(self, fpath): for fname in self.filelist(fpath): with io.open(fname, 'r', encoding="utf-8") as f: for line in f: line = line.strip() arr = line.split(',') if len(arr) != 2: continue self.dictionary[arr[0]] = arr[1] def radical(self, char): if char in self.dictionary: return self.dictionary[char] return None
py
1a4462a8fce96d5ff4656ecb81f5cf2f06748b9a
#!/usr/bin/env python import matplotlib.pyplot as plt import theanets from utils import load_mnist, plot_layers, plot_images e = theanets.Experiment( theanets.Classifier, layers=(784, 1024, 256, 64, 10), train_batches=100, ) # first, run an unsupervised layerwise pretrainer. train, valid, _ = load_mnist() e.train(train, valid, optimize='pretrain', patience=1, min_improvement=0.1) # second, run a supervised trainer on the classifier model. train, valid, _ = load_mnist(labels=True) e.train(train, valid) plot_layers([e.network.find(i, 0) for i in (1, 2, 3)], tied_weights=True) plt.tight_layout() plt.show()
py
1a4463f66f39b5235466f4f77ee94220e21dbcae
import logging from astropy.table import Table from astropy.coordinates import SkyCoord from astropy import units as u from astropy.io import fits import numpy as np import math import matplotlib.pyplot as plt from LCOWCSLookupProvider import getWCSForcamera, transformList from gaiaastrometryservicetools import astrometryServiceRefineWCSFromCatalog from SourceCatalogProvider import e91SourceCatalogProvider, SEPSourceCatalogProvider __author__ = '[email protected]' log = logging.getLogger(__name__) class CatalogMatcher: ''' Class to match two input catalogs: sourcecatalog is a catalog of sources extracted from an image, in coordinates of pixels (x,y) referencecatalog is a catalog of on-sky objects based on existing surveys, in coordinates of (RA, Dec) WCS is a astropy world coordiante system. the source catalog shall be a astropy Table with the columns 'x', 'y' the reference catalog shall be a astropy Table with the columns 'RA', 'Dec' ''' @staticmethod def createMatchedCatalogForLCO(imagepath, referenceCatalogProvider, matchradius=5, minobjects=1e20, undistort=False): ''' Automatically load source catalog from an LCO e91 processed file, fetch a reference catalog, and return a matchedcatalog object.''' if ('e91.fits' in imagepath): sourceCatalogProvider = e91SourceCatalogProvider() else: sourceCatalogProvider = SEPSourceCatalogProvider() sourceCatalog, image_wcs = sourceCatalogProvider.get_source_catalog(imagepath) if (sourceCatalog is None) or (image_wcs is None): return None if len(sourceCatalog['x']) < minobjects: log.info("Not enough stars found in source catalog (%d). %d are required. Skipping this one." % ( len(sourceCatalog['x']), minobjects)) return None ra = image_wcs.wcs.crval[0] dec = image_wcs.wcs.crval[1] # TODO: get camera identifier, date obs, etc exptime = None filter = None camera = None dateobs = None azimuth = None altitude = None hdu = fits.open(imagepath) # TODO: We are opening and closing fits files quite a lot here, might be not most efficient. # Go searching for meta data, in multiple extension ssince we might have a .fz compressed file :-( for extension in [0, 1]: if 'EXPTIME' in hdu[extension].header: exptime = hdu[extension].header['EXPTIME'] if ('FILTER') in hdu[extension].header: filter = hdu[extension].header['FILTER'] if 'DATE-OBS' in hdu[extension].header: dateobs = hdu[extension].header['DATE-OBS'] if 'INSTRUME' in hdu[extension].header: camera = hdu[extension].header['INSTRUME'] if 'AZIMUTH' in hdu[extension].header: azimuth = hdu[extension].header['AZIMUTH'] if 'ALTITUDE' in hdu[extension].header: altitude = hdu[extension].header['ALTITUDE'] hdu.close() # remove the distortion from the input catalog if requested and refine the WCS. if undistort: sip = getWCSForcamera(camera, image_wcs.wcs.crpix[0], image_wcs.wcs.crpix[1]) if sip is not None: log.info("undistorting image") u, v = transformList(sourceCatalog['x'], sourceCatalog['y'], sip) sourceCatalog['x'] = u sourceCatalog['y'] = v dedistortedwcs = astrometryServiceRefineWCSFromCatalog(sourceCatalog, image_wcs) if dedistortedwcs is not None: image_wcs = dedistortedwcs else: log.warning("astrometry.net did not find a solution on the undistorted image. Using original wcs") # fetch a reference catalog: referenceCatalog = referenceCatalogProvider.get_reference_catalog(ra, dec, 0.25) matchedCatalog = CatalogMatcher() matchedCatalog.matchCatalogs(sourceCatalog, referenceCatalog, image_wcs, matchradius) matchedCatalog.exptime = exptime matchedCatalog.filter = filter matchedCatalog.dateobs = dateobs matchedCatalog.camera = camera matchedCatalog.altitude = altitude matchedCatalog.azimuth = azimuth matchedCatalog.azimuth = azimuth return matchedCatalog def matchCatalogs(self, source=None, reference=None, wcs=None, matchradius=5): ''' match input catalogs. If no new catalogs are given, the match will be done on the chached catalogs of the class. ''' self.matchedCatalog = None # Cache management if wcs is not None: self.wcs = wcs if source is not None: self.source = source if reference is not None: self.reference = reference # transform source catalog to RADEC try: sourcera, sourcedec = self.wcs.all_pix2world(self.source['x'], self.source['y'], 1) sourceSkyCoords = SkyCoord(ra=sourcera * u.degree, dec=sourcedec * u.degree) referenceSkyCoords = SkyCoord(ra=self.reference['RA'] * u.degree, dec=self.reference['Dec'] * u.degree) idx, d2d, d3d = referenceSkyCoords.match_to_catalog_sky(sourceSkyCoords) distance = referenceSkyCoords.separation(sourceSkyCoords[idx]).arcsecond matchcondition = (distance < matchradius) self.matchedCatalog = Table([self.source['x'][idx][matchcondition], self.source['y'][idx][matchcondition], self.reference['RA'][matchcondition], self.reference['Dec'][matchcondition], distance[matchcondition] ], names=['x', 'y', 'RA', 'Dec', 'distarcsec'] ) except: log.exception("Error while transforming and matching") nummatched = len(self.matchedCatalog) if self.matchedCatalog is not None else 0 log.info("MatchCatalogs found {: 10d} pairs at search radius {: 6.3f}".format(nummatched, matchradius)) return self.matchedCatalog def updateWCSandUpdateRMS(self, usewcs=None): ''' transform the pixel list with a new wcs and get the distance based merrit function of that sollution. Note that when this is called, there should be already a matched catalog avaiable. ''' if usewcs is not None: self.wcs = usewcs # log.debug ("WCS updated for MatchedCatalog") else: pass # log.info ("WCS not updated") sourcera, sourcedec = self.wcs.all_pix2world(self.matchedCatalog['x'], self.matchedCatalog['y'], 1) sourceSkyCoords = SkyCoord(ra=sourcera * u.degree, dec=sourcedec * u.degree) referenceSkyCoords = SkyCoord(ra=self.matchedCatalog['RA'] * u.degree, dec=self.matchedCatalog['Dec'] * u.degree) self.matchedCatalog['distarcsec'] = referenceSkyCoords.separation(sourceSkyCoords).arcsecond result = math.sqrt(np.sum(self.matchedCatalog['distarcsec'] ** 2) / len(self.matchedCatalog['distarcsec'])) # log.info ("WCS CRVAL % 12.9f % 12.9f , Source RA / Dec [0] %f %f Merrit %f" % (self.wcs.wcs.crval[0], self.wcs.wcs.crval[1], sourcera[0], sourcedec[0], result)) return result def diagnosticPlots(self, basename): ''' Generate some helpful diagnostics for the distortion. ''' if not self.matchedCatalog: return sourcera, sourcedec = self.wcs.all_pix2world(self.matchedCatalog['x'], self.matchedCatalog['y'], 1) deccor = math.cos(self.wcs.wcs.crval[1] * math.pi / 180) plt.subplot(projection=self.wcs) plt.plot(sourcera, sourcedec, '.') plt.plot(self.matchedCatalog['RA'], self.matchedCatalog['Dec'], '.') plt.xlabel("RA") plt.ylabel("DEC") plt.title(basename) plt.savefig("%s_RADEC.png" % basename) plt.close() plt.clf() plt.subplot(4, 1, 1) plt.title(basename) plt.plot(self.matchedCatalog['x'] - self.wcs.wcs.crpix[0], (self.matchedCatalog['RA'] - sourcera) * 3600. / deccor, '.') plt.xlabel("X [pixels]") plt.ylabel("residual RA [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 2) plt.plot(self.matchedCatalog['x'] - self.wcs.wcs.crpix[0], (self.matchedCatalog['Dec'] - sourcedec) * 3600., '.') plt.xlabel("X [pixels]") plt.ylabel("resiudal Dec [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 3) plt.plot(self.matchedCatalog['y'] - self.wcs.wcs.crpix[1], (self.matchedCatalog['RA'] - sourcera) * 3600. / deccor, '.') plt.xlabel("Y [pixels]") plt.ylabel("residual ra [\'\']") plt.ylim([-1.75, 1.75]) plt.subplot(4, 1, 4) plt.plot(self.matchedCatalog['y'] - self.wcs.wcs.crpix[1], (self.matchedCatalog['Dec'] - sourcedec) * 3600., '.') plt.xlabel("Y [pixels]") plt.ylabel("residual dec [\'\']") plt.ylim([-1.75, 1.75]) plt.savefig("%s_residuals.png" % basename, dpi=200) plt.close() # plt.clf() # plt.plot(np.sqrt((self.matchedCatalog['y'] - self.wcs.wcs.crpix[1]) ** 2 + ( # self.matchedCatalog['x'] - self.wcs.wcs.crpix[0]) ** 2), # self.matchedCatalog['distarcsec'], '.') # plt.xlabel("radius [pixels]") # plt.ylabel("Distance [\'\']") # plt.savefig("%s_radialdist.png" % basename)
py
1a446411f6b84d3d6ffb31e7864845cc2c343b65
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The BitPal Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listsinceblock RPC.""" from test_framework.test_framework import BitPalTestFramework from test_framework.messages import BIP125_SEQUENCE_NUMBER from test_framework.util import ( assert_array_result, assert_equal, assert_raises_rpc_error, connect_nodes, ) from decimal import Decimal class ListSinceBlockTest(BitPalTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have # only one connection. (See fPreferredDownload in net_processing) connect_nodes(self.nodes[1], 2) self.nodes[2].generate(101) self.sync_all() self.test_no_blockhash() self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() self.double_spends_filtered() def test_no_blockhash(self): self.log.info("Test no blockhash") txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) blockhash, = self.nodes[2].generate(1) blockheight = self.nodes[2].getblockheader(blockhash)['height'] self.sync_all() txs = self.nodes[0].listtransactions() assert_array_result(txs, {"txid": txid}, { "category": "receive", "amount": 1, "blockhash": blockhash, "blockheight": blockheight, "confirmations": 1, }) assert_equal( self.nodes[0].listsinceblock(), {"lastblock": blockhash, "removed": [], "transactions": txs}) assert_equal( self.nodes[0].listsinceblock(""), {"lastblock": blockhash, "removed": [], "transactions": txs}) def test_invalid_blockhash(self): self.log.info("Test invalid blockhash") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "0000000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 11, for 'invalid-hex')", self.nodes[0].listsinceblock, "invalid-hex") assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock, "Z000000000000000000000000000000000000000000000000000000000000000") def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: ab0 / \ aa1 [tx0] bb1 | | aa2 bb2 | | aa3 bb3 | bb4 Consider a client that has only seen block `aa3` above. It asks the node to `listsinceblock aa3`. But at some point prior the main chain switched to the bb chain. Previously: listsinceblock would find height=4 for block aa3 and compare this to height=5 for the tip of the chain (bb4). It would then return results restricted to bb3-bb4. Now: listsinceblock finds the fork at ab0 and returns results in the range bb1-bb4. This test only checks that [tx0] is present. ''' self.log.info("Test reorg") # Split network into two self.split_network() # send to nodes[0] from nodes[2] senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) # generate on both sides nodes1_last_blockhash = self.nodes[1].generate(6)[-1] nodes2_first_blockhash = self.nodes[2].generate(7)[0] self.log.debug("nodes[1] last blockhash = {}".format(nodes1_last_blockhash)) self.log.debug("nodes[2] first blockhash = {}".format(nodes2_first_blockhash)) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) self.join_network() # listsinceblock(nodes1_last_blockhash) should now include tx as seen from nodes[0] # and return the block height which listsinceblock now exposes since a5e7795. transactions = self.nodes[0].listsinceblock(nodes1_last_blockhash)['transactions'] found = next(tx for tx in transactions if tx['txid'] == senttx) assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height']) def test_double_spend(self): ''' This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. ab0 / \ aa1 [tx1] bb1 [tx2] | | aa2 bb2 | | aa3 bb3 | bb4 Problematic case: 1. User 1 receives BCC in tx1 from utxo1 in block aa1. 2. User 2 receives BCC in tx2 from utxo1 (same) in block bb1 3. User 1 sees 2 confirmations at block aa3. 4. Reorg into bb chain. 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now invalidated. Currently the solution to this is to detect that a reorg'd block is asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. ''' self.log.info("Test double spend") self.sync_all() # Split network into two self.split_network() # share utxo between nodes[1] and nodes[2] utxos = self.nodes[2].listunspent() utxo = utxos[0] privkey = self.nodes[2].dumpprivkey(utxo['address']) self.nodes[1].importprivkey(privkey) # send from nodes[1] using utxo to nodes[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[1].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] txid1 = self.nodes[1].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet( self.nodes[1].createrawtransaction(utxo_dicts, recipient_dict))['hex']) # send from nodes[2] using utxo to nodes[3] recipient_dict2 = { self.nodes[3].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } self.nodes[2].sendrawtransaction( self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict2))['hex']) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(4) self.join_network() self.sync_all() # gettransaction should work for txid1 assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1" # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # but it should not include 'removed' if include_removed=false lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False) assert 'removed' not in lsbres2 def test_double_send(self): ''' This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping as a result). ab0 / \ aa1 [tx1] bb1 | | aa2 bb2 | | aa3 bb3 [tx1] | bb4 Asserted: 1. tx1 is listed in listsinceblock. 2. It is included in 'removed' as it was removed, even though it is now present in a different block. 3. It is listed with a confirmation count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). ''' self.log.info("Test double send") self.sync_all() # Split network into two self.split_network() # create and sign a transaction utxos = self.nodes[2].listunspent() utxo = utxos[0] change = '%.8f' % (float(utxo['amount']) - 1.0003) recipient_dict = { self.nodes[0].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } utxo_dicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] signedtxres = self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxo_dicts, recipient_dict)) assert signedtxres['complete'] signedtx = signedtxres['hex'] # send from nodes[1]; this will end up in aa1 txid1 = self.nodes[1].sendrawtransaction(signedtx) # generate bb1-bb2 on right side self.nodes[2].generate(2) # send from nodes[2]; this will end up in bb3 txid2 = self.nodes[2].sendrawtransaction(signedtx) assert_equal(txid1, txid2) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(2) self.join_network() self.sync_all() # gettransaction should work for txid1 tx1 = self.nodes[0].gettransaction(txid1) assert_equal(tx1['blockheight'], self.nodes[0].getblockheader(tx1['blockhash'])['height']) # listsinceblock(lastblockhash) should now include txid1 in transactions # as well as in removed lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # find transaction and ensure confirmations is valid for tx in lsbres['transactions']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) # the same check for the removed array; confirmations should STILL be 2 for tx in lsbres['removed']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) def double_spends_filtered(self): ''' `listsinceblock` was returning conflicted transactions even if they occurred before the specified cutoff blockhash ''' self.log.info("Test spends filtered") spending_node = self.nodes[2] dest_address = spending_node.getnewaddress() tx_input = dict( sequence=BIP125_SEQUENCE_NUMBER, **next(u for u in spending_node.listunspent())) rawtx = spending_node.createrawtransaction( [tx_input], {dest_address: tx_input["amount"] - Decimal("0.00051000"), spending_node.getrawchangeaddress(): Decimal("0.00050000")}) signedtx = spending_node.signrawtransactionwithwallet(rawtx) orig_tx_id = spending_node.sendrawtransaction(signedtx["hex"]) original_tx = spending_node.gettransaction(orig_tx_id) double_tx = spending_node.bumpfee(orig_tx_id) # check that both transactions exist block_hash = spending_node.listsinceblock( spending_node.getblockhash(spending_node.getblockcount())) original_found = False double_found = False for tx in block_hash['transactions']: if tx['txid'] == original_tx['txid']: original_found = True if tx['txid'] == double_tx['txid']: double_found = True assert_equal(original_found, True) assert_equal(double_found, True) lastblockhash = spending_node.generate(1)[0] # check that neither transaction exists block_hash = spending_node.listsinceblock(lastblockhash) original_found = False double_found = False for tx in block_hash['transactions']: if tx['txid'] == original_tx['txid']: original_found = True if tx['txid'] == double_tx['txid']: double_found = True assert_equal(original_found, False) assert_equal(double_found, False) if __name__ == '__main__': ListSinceBlockTest().main()
py
1a4464387108a20c892852d21b294ba59787c60f
import vigra import numpy import opengm from seglib import cgp2d from seglib.clustering.ce_multicut import * img = "img/37073.jpg" img = "img/42049.jpg" binCount=15 sigma = 1.5 img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] lab = vigra.colors.transform_RGB2Lab(img) labels ,nseg= vigra.analysis.slicSuperpixels(lab,10.0,25) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) imgBig = vigra.sampling.resize(lab,cgp.shape) grad = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgBig,4.5))+0.1 print "accumulate cell hist" hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]).astype(numpy.float32) print hist.shape #hist=vigra.taggedView(hist,"xc") #hist=hist.transposeToVigraOrder() hist=numpy.array(hist) print "construkt" hlo = cgp2d.HighLevelObjective(cgp) print "set features" hlo.setRegionFeatures(hist)
py
1a4465ec431621cdfc061249446635b77dfbd39b
# -*- coding: utf-8 -*- """ spectrum """ # import standard libraries import os from colour.colorimetry.spectrum import MultiSpectralDistributions from colour.models.rgb.datasets import srgb # import third party libraries import numpy as np from colour import SpectralShape, XYZ_to_RGB, XYZ_to_xyY from colour.models import RGB_COLOURSPACE_BT709 from sympy import Symbol, diff from colour.utilities import tstack # import my libraries import plot_utility as pu import spectrum_calculation as scl from spectrum_calculation import VALID_WAVELENGTH_ST, VALID_WAVELENGTH_ED,\ REFRECT_100P_SD import color_space as cs import test_pattern_generator2 as tpg import transfer_functions as tf # information __author__ = 'Toru Yoshihara' __copyright__ = 'Copyright (C) 2021 - Toru Yoshihara' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Toru Yoshihara' __email__ = 'toru.ver.11 at-sign gmail.com' __all__ = [] def load_camera_spectral_sensitivity_database(): sony_ss = scl.get_sony_nex5_ss() fig, ax1 = pu.plot_1_graph( fontsize=18, figsize=(10, 6), bg_color=(0.96, 0.96, 0.96), graph_title="SONY NEX-5N", graph_title_size=None, xlabel="Wavelength [nm]", ylabel="???", axis_label_size=None, legend_size=14, xlim=[380, 730], ylim=None, xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=2, minor_xtick_num=None, minor_ytick_num=None) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 0], label="R", color=pu.RED, alpha=1.0) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 1], label="G", color=pu.GREEN, alpha=1.0) ax1.plot( sony_ss.wavelengths, sony_ss.values[..., 2], label="B", color=pu.BLUE, alpha=1.0) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="./img/sony_ssd.png") # pu.show_and_save( # fig=fig, legend_loc='upper right', save_fname=None) def plot_camera_gamut(): sony_ss = scl.get_sony_nex5_ss() sony_csd = scl.CameraSpectralDistribution(sony_ss) primaries, white = sony_csd.calc_primary_xyY_and_white_xyY() print(primaries) print(white) fig, ax1 = pu.plot_1_graph( fontsize=18, figsize=(10, 10), bg_color=(0.96, 0.96, 0.96), graph_title="SONY NEX-5N", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=14, xlim=None, ylim=None, xtick=None, ytick=None, xtick_size=None, ytick_size=None, linewidth=2, minor_xtick_num=None, minor_ytick_num=None) ax1.plot(primaries[..., 0], primaries[..., 1], label="Gamut") ax1.plot(white[0], white[1], 'x', label="Gamut", ms=10, mew=3) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="./img/sony_gamut.png") def debug_least_square_method(): var_str_list = [ ['m11', 'm12', 'm13'], ['m21', 'm22', 'm23'], ['m31', 'm32', 'm33']] mtx = [[Symbol(var_str_list[i][j]) for j in range(3)] for i in range(3)] xx = Symbol('xx') yy = Symbol('yy') zz = Symbol('zz') rr = Symbol('rr') gg = Symbol('gg') bb = Symbol('bb') jr = (xx - (mtx[0][0] * rr + mtx[0][1] * gg + mtx[0][2] * bb)) ** 2 jg = (yy - (mtx[1][0] * rr + mtx[1][1] * gg + mtx[1][2] * bb)) ** 2 jb = (zz - (mtx[2][0] * rr + mtx[2][1] * gg + mtx[2][2] * bb)) ** 2 jj = jr + jg + jb m11_diff = diff(jr, mtx[0][0]) m12_diff = diff(jr, mtx[0][1]) m13_diff = diff(jr, mtx[0][2]) print(m11_diff) print(m12_diff) print(m13_diff) def debug_cct_matrix(): color_temp = 6504 light_sd = scl.calc_illuminant_d_spectrum(color_temp) color_checker_sd = scl.load_color_checker_spectrum() camera_ss = scl.get_sony_nex5_ss() cmfs = scl.get_cie_2_1931_cmf() cct_matrix = scl.calc_cct_matrix_from_color_checker(camera_ss=camera_ss) camera_rgb = scl.calc_tristimulus_values_from_multi_spectrum( src_sd=light_sd, ref_sd=color_checker_sd, ss=camera_ss) measure_xyz = scl.calc_xyz_from_multi_spectrum( src_sd=light_sd, ref_sd=color_checker_sd, cmfs=cmfs) print(cct_matrix) camera_xyz_using_mtx = scl.apply_matrix(src=camera_rgb, mtx=cct_matrix) true_rgb = XYZ_to_RGB( measure_xyz, cs.D65, cs.D65, RGB_COLOURSPACE_BT709.matrix_XYZ_to_RGB) estimated_rgb = XYZ_to_RGB( camera_xyz_using_mtx, cs.D65, cs.D65, RGB_COLOURSPACE_BT709.matrix_XYZ_to_RGB) true_rgb_srgb = tf.oetf(np.clip(true_rgb, 0.0, 1.0), tf.SRGB) est_rgb_srgb = tf.oetf(np.clip(estimated_rgb, 0.0, 1.0), tf.SRGB) img = tpg.plot_color_checker_image( rgb=true_rgb_srgb, rgb2=est_rgb_srgb) tpg.img_wirte_float_as_16bit_int("./img/cct_mtx.png", img) # primaries xmin = 0.0 xmax = 0.8 ymin = -0.4 ymax = 1.2 primary_rgb = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 1]]) primary_xyz = scl.apply_matrix(primary_rgb, cct_matrix) primary_xyY = XYZ_to_xyY(primary_xyz) bt709_gamut, _ = tpg.get_primaries(name=cs.BT709) bt2020_gamut, _ = tpg.get_primaries(name=cs.BT2020) dci_p3_gamut, _ = tpg.get_primaries(name=cs.P3_D65) xy_image = tpg.get_chromaticity_image( xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(8, 14), bg_color=(0.96, 0.96, 0.96), graph_title="Chromaticity Diagram?", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=17, xlim=[xmin, xmax], ylim=[ymin, ymax], xtick=[0.1 * x for x in range(9)], ytick=[0.1 * x - 0.4 for x in range(17)], xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None) cmf_xy = tpg._get_cmfs_xy() ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', label=None) ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1], c=pu.RED, label="BT.709", lw=2, alpha=0.8) ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1], c=pu.YELLOW, label="BT.2020", lw=2, alpha=0.8) ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1], c=pu.BLUE, label="DCI-P3", lw=2, alpha=0.8) ax1.plot( (cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]), '-k', label=None) ax1.plot( primary_xyY[:4, 0], primary_xyY[:4, 1], color='k', label="SONY NEX-5N") ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax)) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="img/camera_chroma_test.png") def calc_camera_gamut_from_ss(): color_temp = 6504 light_sd = scl.REFRECT_100P_SD camera_ss = scl.get_sony_nex5_ss() cmfs = scl.get_cie_2_1931_cmf() cr = camera_ss.values[..., 0] cg = camera_ss.values[..., 1] cb = camera_ss.values[..., 2] rr = cmfs.values[..., 0] gg = cmfs.values[..., 1] bb = cmfs.values[..., 2] r_base = cr - cr*cg - cr*cb g_base = cg - cg*cr - cg*cb b_base = cb - cb*cr - cb*cg rx = np.sum(r_base * rr) ry = np.sum(r_base * gg) rz = np.sum(r_base * bb) gx = np.sum(g_base * rr) gy = np.sum(g_base * gg) gz = np.sum(g_base * bb) bx = np.sum(b_base * rr) by = np.sum(b_base * gg) bz = np.sum(b_base * bb) r_xyY = XYZ_to_xyY(tstack([rx, ry, rz])) g_xyY = XYZ_to_xyY(tstack([gx, gy, gz])) b_xyY = XYZ_to_xyY(tstack([bx, by, bz])) print(r_xyY) print(g_xyY) print(b_xyY) def plot_camera_capture_xy_value(): wavelengths = REFRECT_100P_SD.wavelengths cmfs = scl.get_cie_2_1931_cmf() length = len(wavelengths) spectrum_array = np.zeros((length, length)) for idx in range(length): spectrum_array[idx, idx] = 1 data = dict(zip(wavelengths, spectrum_array)) src_sd = MultiSpectralDistributions(data=data) camera_ss = scl.get_sony_nex5_ss() camera_rgb = scl.calc_tristimulus_values_from_multi_spectrum( src_sd=REFRECT_100P_SD, ref_sd=src_sd, ss=camera_ss) cct_matrix = scl.calc_cct_matrix_from_color_checker(camera_ss=camera_ss) camera_xyz_using_mtx = scl.apply_matrix(src=camera_rgb, mtx=cct_matrix) camera_xyY = XYZ_to_xyY(camera_xyz_using_mtx) # ok_idx = camera_xyY[..., 2] != 0 ok_idx = (wavelengths >= 400) & (wavelengths <= 720) ok_wavelength = wavelengths[ok_idx] ok_xyY = camera_xyY[ok_idx] linear_rgb_from_line_spectrum = scl.calc_linear_rgb_from_spectrum( src_sd=REFRECT_100P_SD, ref_sd=src_sd, cmfs=cmfs, color_space=RGB_COLOURSPACE_BT709) linear_rgb_from_line_spectrum = linear_rgb_from_line_spectrum[ok_idx] linear_rgb_from_line_spectrum =\ linear_rgb_from_line_spectrum / np.max(linear_rgb_from_line_spectrum, -1)[0] # primaries xmin = 0.0 xmax = 0.8 ymin = -0.4 ymax = 1.2 primary_rgb = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 1, 1]]) primary_xyz = scl.apply_matrix(primary_rgb, cct_matrix) primary_xyY = XYZ_to_xyY(primary_xyz) bt709_gamut, _ = tpg.get_primaries(name=cs.BT709) bt2020_gamut, _ = tpg.get_primaries(name=cs.BT2020) dci_p3_gamut, _ = tpg.get_primaries(name=cs.P3_D65) xy_image = tpg.get_chromaticity_image( xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax) fig, ax1 = pu.plot_1_graph( fontsize=20, figsize=(8, 14), bg_color=(0.96, 0.96, 0.96), graph_title="Chromaticity Diagram?", graph_title_size=None, xlabel="x", ylabel="y", axis_label_size=None, legend_size=17, xlim=[xmin, xmax], ylim=[ymin, ymax], xtick=[0.1 * x for x in range(9)], ytick=[0.1 * x - 0.4 for x in range(17)], xtick_size=None, ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=None) cmf_xy = tpg._get_cmfs_xy() ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', label=None) ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1], c=pu.RED, label="BT.709", lw=2, alpha=0.8) ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1], c=pu.YELLOW, label="BT.2020", lw=2, alpha=0.8) ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1], c=pu.BLUE, label="DCI-P3", lw=2, alpha=0.8) ax1.plot( (cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]), '-k', label=None) ax1.plot( primary_xyY[:4, 0], primary_xyY[:4, 1], color='k', label="SONY NEX-5N") ax1.scatter( ok_xyY[..., 0], ok_xyY[..., 1], label="monochromatic light", edgecolors=None, c=(0.4, 0.4, 0.4) ) ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax)) pu.show_and_save( fig=fig, legend_loc='upper right', save_fname="img/camera_chroma_with_line_spectrum.png") if __name__ == '__main__': os.chdir(os.path.dirname(os.path.abspath(__file__))) # load_camera_spectral_sensitivity_database() # plot_camera_gamut() # debug_least_square_method() # debug_cct_matrix() # calc_camera_gamut_from_ss() plot_camera_capture_xy_value()
py
1a4468154df210e8087949d0a2adf9ab9c3d8652
from bisect import bisect_left from bisect import bisect_right from contextlib import contextmanager from copy import deepcopy from functools import wraps from inspect import isclass import calendar import collections import datetime import decimal import hashlib import itertools import logging import operator import re import socket import struct import sys import threading import time import uuid import warnings try: from collections.abc import Mapping except ImportError: from collections import Mapping try: from pysqlite3 import dbapi2 as pysq3 except ImportError: try: from pysqlite2 import dbapi2 as pysq3 except ImportError: pysq3 = None try: import sqlite3 except ImportError: sqlite3 = pysq3 else: if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info: sqlite3 = pysq3 try: from psycopg2cffi import compat compat.register() except ImportError: pass try: import psycopg2 from psycopg2 import extensions as pg_extensions try: from psycopg2 import errors as pg_errors except ImportError: pg_errors = None except ImportError: psycopg2 = pg_errors = None try: from psycopg2.extras import register_uuid as pg_register_uuid pg_register_uuid() except Exception: pass mysql_passwd = False try: import pymysql as mysql except ImportError: try: import MySQLdb as mysql mysql_passwd = True except ImportError: mysql = None __version__ = '3.14.4' __all__ = [ 'AsIs', 'AutoField', 'BareField', 'BigAutoField', 'BigBitField', 'BigIntegerField', 'BinaryUUIDField', 'BitField', 'BlobField', 'BooleanField', 'Case', 'Cast', 'CharField', 'Check', 'chunked', 'Column', 'CompositeKey', 'Context', 'Database', 'DatabaseError', 'DatabaseProxy', 'DataError', 'DateField', 'DateTimeField', 'DecimalField', 'DeferredForeignKey', 'DeferredThroughModel', 'DJANGO_MAP', 'DoesNotExist', 'DoubleField', 'DQ', 'EXCLUDED', 'Field', 'FixedCharField', 'FloatField', 'fn', 'ForeignKeyField', 'IdentityField', 'ImproperlyConfigured', 'Index', 'IntegerField', 'IntegrityError', 'InterfaceError', 'InternalError', 'IPField', 'JOIN', 'ManyToManyField', 'Model', 'ModelIndex', 'MySQLDatabase', 'NotSupportedError', 'OP', 'OperationalError', 'PostgresqlDatabase', 'PrimaryKeyField', # XXX: Deprecated, change to AutoField. 'prefetch', 'ProgrammingError', 'Proxy', 'QualifiedNames', 'SchemaManager', 'SmallIntegerField', 'Select', 'SQL', 'SqliteDatabase', 'Table', 'TextField', 'TimeField', 'TimestampField', 'Tuple', 'UUIDField', 'Value', 'ValuesList', 'Window', ] try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logger = logging.getLogger('peewee') logger.addHandler(NullHandler()) if sys.version_info[0] == 2: text_type = unicode bytes_type = str buffer_type = buffer izip_longest = itertools.izip_longest callable_ = callable multi_types = (list, tuple, frozenset, set) exec('def reraise(tp, value, tb=None): raise tp, value, tb') def print_(s): sys.stdout.write(s) sys.stdout.write('\n') else: import builtins try: from collections.abc import Callable except ImportError: from collections import Callable from functools import reduce callable_ = lambda c: isinstance(c, Callable) text_type = str bytes_type = bytes buffer_type = memoryview basestring = str long = int multi_types = (list, tuple, frozenset, set, range) print_ = getattr(builtins, 'print') izip_longest = itertools.zip_longest def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value if sqlite3: sqlite3.register_adapter(decimal.Decimal, str) sqlite3.register_adapter(datetime.date, str) sqlite3.register_adapter(datetime.time, str) __sqlite_version__ = sqlite3.sqlite_version_info else: __sqlite_version__ = (0, 0, 0) __date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second')) # Sqlite does not support the `date_part` SQL function, so we will define an # implementation in python. __sqlite_datetime_formats__ = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d', '%H:%M:%S', '%H:%M:%S.%f', '%H:%M') __sqlite_date_trunc__ = { 'year': '%Y-01-01 00:00:00', 'month': '%Y-%m-01 00:00:00', 'day': '%Y-%m-%d 00:00:00', 'hour': '%Y-%m-%d %H:00:00', 'minute': '%Y-%m-%d %H:%M:00', 'second': '%Y-%m-%d %H:%M:%S'} __mysql_date_trunc__ = __sqlite_date_trunc__.copy() __mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i:00' __mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S' def _sqlite_date_part(lookup_type, datetime_string): assert lookup_type in __date_parts__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return getattr(dt, lookup_type) def _sqlite_date_trunc(lookup_type, datetime_string): assert lookup_type in __sqlite_date_trunc__ if not datetime_string: return dt = format_date_time(datetime_string, __sqlite_datetime_formats__) return dt.strftime(__sqlite_date_trunc__[lookup_type]) def __deprecated__(s): warnings.warn(s, DeprecationWarning) class attrdict(dict): def __getattr__(self, attr): try: return self[attr] except KeyError: raise AttributeError(attr) def __setattr__(self, attr, value): self[attr] = value def __iadd__(self, rhs): self.update(rhs); return self def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d SENTINEL = object() #: Operations for use in SQL expressions. OP = attrdict( AND='AND', OR='OR', ADD='+', SUB='-', MUL='*', DIV='/', BIN_AND='&', BIN_OR='|', XOR='#', MOD='%', EQ='=', LT='<', LTE='<=', GT='>', GTE='>=', NE='!=', IN='IN', NOT_IN='NOT IN', IS='IS', IS_NOT='IS NOT', LIKE='LIKE', ILIKE='ILIKE', BETWEEN='BETWEEN', REGEXP='REGEXP', IREGEXP='IREGEXP', CONCAT='||', BITWISE_NEGATION='~') # To support "django-style" double-underscore filters, create a mapping between # operation name and operation code, e.g. "__eq" == OP.EQ. DJANGO_MAP = attrdict({ 'eq': operator.eq, 'lt': operator.lt, 'lte': operator.le, 'gt': operator.gt, 'gte': operator.ge, 'ne': operator.ne, 'in': operator.lshift, 'is': lambda l, r: Expression(l, OP.IS, r), 'like': lambda l, r: Expression(l, OP.LIKE, r), 'ilike': lambda l, r: Expression(l, OP.ILIKE, r), 'regexp': lambda l, r: Expression(l, OP.REGEXP, r), }) #: Mapping of field type to the data-type supported by the database. Databases #: may override or add to this list. FIELD = attrdict( AUTO='INTEGER', BIGAUTO='BIGINT', BIGINT='BIGINT', BLOB='BLOB', BOOL='SMALLINT', CHAR='CHAR', DATE='DATE', DATETIME='DATETIME', DECIMAL='DECIMAL', DEFAULT='', DOUBLE='REAL', FLOAT='REAL', INT='INTEGER', SMALLINT='SMALLINT', TEXT='TEXT', TIME='TIME', UUID='TEXT', UUIDB='BLOB', VARCHAR='VARCHAR') #: Join helpers (for convenience) -- all join types are supported, this object #: is just to help avoid introducing errors by using strings everywhere. JOIN = attrdict( INNER='INNER JOIN', LEFT_OUTER='LEFT OUTER JOIN', RIGHT_OUTER='RIGHT OUTER JOIN', FULL='FULL JOIN', FULL_OUTER='FULL OUTER JOIN', CROSS='CROSS JOIN', NATURAL='NATURAL JOIN', LATERAL='LATERAL', LEFT_LATERAL='LEFT JOIN LATERAL') # Row representations. ROW = attrdict( TUPLE=1, DICT=2, NAMED_TUPLE=3, CONSTRUCTOR=4, MODEL=5) SCOPE_NORMAL = 1 SCOPE_SOURCE = 2 SCOPE_VALUES = 4 SCOPE_CTE = 8 SCOPE_COLUMN = 16 # Rules for parentheses around subqueries in compound select. CSQ_PARENTHESES_NEVER = 0 CSQ_PARENTHESES_ALWAYS = 1 CSQ_PARENTHESES_UNNESTED = 2 # Regular expressions used to convert class names to snake-case table names. # First regex handles acronym followed by word or initial lower-word followed # by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar. # Second regex handles the normal case of two title-cased words. SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)') SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])') # Helper functions that are used in various parts of the codebase. MODEL_BASE = '_metaclass_helper_' def with_metaclass(meta, base=object): return meta(MODEL_BASE, (base,), {}) def merge_dict(source, overrides): merged = source.copy() if overrides: merged.update(overrides) return merged def quote(path, quote_chars): if len(path) == 1: return path[0].join(quote_chars) return '.'.join([part.join(quote_chars) for part in path]) is_model = lambda o: isclass(o) and issubclass(o, Model) def ensure_tuple(value): if value is not None: return value if isinstance(value, (list, tuple)) else (value,) def ensure_entity(value): if value is not None: return value if isinstance(value, Node) else Entity(value) def make_snake_case(s): first = SNAKE_CASE_STEP1.sub(r'\1_\2', s) return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower() def chunked(it, n): marker = object() for group in (list(g) for g in izip_longest(*[iter(it)] * n, fillvalue=marker)): if group[-1] is marker: del group[group.index(marker):] yield group class _callable_context_manager(object): def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with self: return fn(*args, **kwargs) return inner class Proxy(object): """ Create a proxy or placeholder for another object. """ __slots__ = ('obj', '_callbacks') def __init__(self): self._callbacks = [] self.initialize(None) def initialize(self, obj): self.obj = obj for callback in self._callbacks: callback(obj) def attach_callback(self, callback): self._callbacks.append(callback) return callback def passthrough(method): def inner(self, *args, **kwargs): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, method)(*args, **kwargs) return inner # Allow proxy to be used as a context-manager. __enter__ = passthrough('__enter__') __exit__ = passthrough('__exit__') def __getattr__(self, attr): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, attr) def __setattr__(self, attr, value): if attr not in self.__slots__: raise AttributeError('Cannot set attribute on proxy.') return super(Proxy, self).__setattr__(attr, value) class DatabaseProxy(Proxy): """ Proxy implementation specifically for proxying `Database` objects. """ def connection_context(self): return ConnectionContext(self) def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) class ModelDescriptor(object): pass # SQL Generation. class AliasManager(object): __slots__ = ('_counter', '_current_index', '_mapping') def __init__(self): # A list of dictionaries containing mappings at various depths. self._counter = 0 self._current_index = 0 self._mapping = [] self.push() @property def mapping(self): return self._mapping[self._current_index - 1] def add(self, source): if source not in self.mapping: self._counter += 1 self[source] = 't%d' % self._counter return self.mapping[source] def get(self, source, any_depth=False): if any_depth: for idx in reversed(range(self._current_index)): if source in self._mapping[idx]: return self._mapping[idx][source] return self.add(source) def __getitem__(self, source): return self.get(source) def __setitem__(self, source, alias): self.mapping[source] = alias def push(self): self._current_index += 1 if self._current_index > len(self._mapping): self._mapping.append({}) def pop(self): if self._current_index == 1: raise ValueError('Cannot pop() from empty alias manager.') self._current_index -= 1 class State(collections.namedtuple('_State', ('scope', 'parentheses', 'settings'))): def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs): return super(State, cls).__new__(cls, scope, parentheses, kwargs) def __call__(self, scope=None, parentheses=None, **kwargs): # Scope and settings are "inherited" (parentheses is not, however). scope = self.scope if scope is None else scope # Try to avoid unnecessary dict copying. if kwargs and self.settings: settings = self.settings.copy() # Copy original settings dict. settings.update(kwargs) # Update copy with overrides. elif kwargs: settings = kwargs else: settings = self.settings return State(scope, parentheses, **settings) def __getattr__(self, attr_name): return self.settings.get(attr_name) def __scope_context__(scope): @contextmanager def inner(self, **kwargs): with self(scope=scope, **kwargs): yield self return inner class Context(object): __slots__ = ('stack', '_sql', '_values', 'alias_manager', 'state') def __init__(self, **settings): self.stack = [] self._sql = [] self._values = [] self.alias_manager = AliasManager() self.state = State(**settings) def as_new(self): return Context(**self.state.settings) def column_sort_key(self, item): return item[0].get_sort_key(self) @property def scope(self): return self.state.scope @property def parentheses(self): return self.state.parentheses @property def subquery(self): return self.state.subquery def __call__(self, **overrides): if overrides and overrides.get('scope') == self.scope: del overrides['scope'] self.stack.append(self.state) self.state = self.state(**overrides) return self scope_normal = __scope_context__(SCOPE_NORMAL) scope_source = __scope_context__(SCOPE_SOURCE) scope_values = __scope_context__(SCOPE_VALUES) scope_cte = __scope_context__(SCOPE_CTE) scope_column = __scope_context__(SCOPE_COLUMN) def __enter__(self): if self.parentheses: self.literal('(') return self def __exit__(self, exc_type, exc_val, exc_tb): if self.parentheses: self.literal(')') self.state = self.stack.pop() @contextmanager def push_alias(self): self.alias_manager.push() yield self.alias_manager.pop() def sql(self, obj): if isinstance(obj, (Node, Context)): return obj.__sql__(self) elif is_model(obj): return obj._meta.table.__sql__(self) else: return self.sql(Value(obj)) def literal(self, keyword): self._sql.append(keyword) return self def value(self, value, converter=None, add_param=True): if converter: value = converter(value) elif converter is None and self.state.converter: # Explicitly check for None so that "False" can be used to signify # that no conversion should be applied. value = self.state.converter(value) if isinstance(value, Node): with self(converter=None): return self.sql(value) elif is_model(value): # Under certain circumstances, we could end-up treating a model- # class itself as a value. This check ensures that we drop the # table alias into the query instead of trying to parameterize a # model (for instance, passing a model as a function argument). with self.scope_column(): return self.sql(value) self._values.append(value) return self.literal(self.state.param or '?') if add_param else self def __sql__(self, ctx): ctx._sql.extend(self._sql) ctx._values.extend(self._values) return ctx def parse(self, node): return self.sql(node).query() def query(self): return ''.join(self._sql), self._values def query_to_string(query): # NOTE: this function is not exported by default as it might be misused -- # and this misuse could lead to sql injection vulnerabilities. This # function is intended for debugging or logging purposes ONLY. db = getattr(query, '_database', None) if db is not None: ctx = db.get_sql_context() else: ctx = Context() sql, params = ctx.sql(query).query() if not params: return sql param = ctx.state.param or '?' if param == '?': sql = sql.replace('?', '%s') return sql % tuple(map(_query_val_transform, params)) def _query_val_transform(v): # Interpolate parameters. if isinstance(v, (text_type, datetime.datetime, datetime.date, datetime.time)): v = "'%s'" % v elif isinstance(v, bytes_type): try: v = v.decode('utf8') except UnicodeDecodeError: v = v.decode('raw_unicode_escape') v = "'%s'" % v elif isinstance(v, int): v = '%s' % int(v) # Also handles booleans -> 1 or 0. elif v is None: v = 'NULL' else: v = str(v) return v # AST. class Node(object): _coerce = True def clone(self): obj = self.__class__.__new__(self.__class__) obj.__dict__ = self.__dict__.copy() return obj def __sql__(self, ctx): raise NotImplementedError @staticmethod def copy(method): def inner(self, *args, **kwargs): clone = self.clone() method(clone, *args, **kwargs) return clone return inner def coerce(self, _coerce=True): if _coerce != self._coerce: clone = self.clone() clone._coerce = _coerce return clone return self def is_alias(self): return False def unwrap(self): return self class ColumnFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Column(self.node, attr) class _DynamicColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return ColumnFactory(instance) # Implements __getattr__(). return self class _ExplicitColumn(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: raise AttributeError( '%s specifies columns explicitly, and does not support ' 'dynamic column lookups.' % instance) return self class Source(Node): c = _DynamicColumn() def __init__(self, alias=None): super(Source, self).__init__() self._alias = alias @Node.copy def alias(self, name): self._alias = name def select(self, *columns): if not columns: columns = (SQL('*'),) return Select((self,), columns) def join(self, dest, join_type=JOIN.INNER, on=None): return Join(self, dest, join_type, on) def left_outer_join(self, dest, on=None): return Join(self, dest, JOIN.LEFT_OUTER, on) def cte(self, name, recursive=False, columns=None, materialized=None): return CTE(name, self, recursive=recursive, columns=columns, materialized=materialized) def get_sort_key(self, ctx): if self._alias: return (self._alias,) return (ctx.alias_manager[self],) def apply_alias(self, ctx): # If we are defining the source, include the "AS alias" declaration. An # alias is created for the source if one is not already defined. if ctx.scope == SCOPE_SOURCE: if self._alias: ctx.alias_manager[self] = self._alias ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) return ctx def apply_column(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias return ctx.sql(Entity(ctx.alias_manager[self])) class _HashableSource(object): def __init__(self, *args, **kwargs): super(_HashableSource, self).__init__(*args, **kwargs) self._update_hash() @Node.copy def alias(self, name): self._alias = name self._update_hash() def _update_hash(self): self._hash = self._get_hash() def _get_hash(self): return hash((self.__class__, self._path, self._alias)) def __hash__(self): return self._hash def __eq__(self, other): if isinstance(other, _HashableSource): return self._hash == other._hash return Expression(self, OP.EQ, other) def __ne__(self, other): if isinstance(other, _HashableSource): return self._hash != other._hash return Expression(self, OP.NE, other) def _e(op): def inner(self, rhs): return Expression(self, op, rhs) return inner __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) def __bind_database__(meth): @wraps(meth) def inner(self, *args, **kwargs): result = meth(self, *args, **kwargs) if self._database: return result.bind(self._database) return result return inner def __join__(join_type=JOIN.INNER, inverted=False): def method(self, other): if inverted: self, other = other, self return Join(self, other, join_type=join_type) return method class BaseTable(Source): __and__ = __join__(JOIN.INNER) __add__ = __join__(JOIN.LEFT_OUTER) __sub__ = __join__(JOIN.RIGHT_OUTER) __or__ = __join__(JOIN.FULL_OUTER) __mul__ = __join__(JOIN.CROSS) __rand__ = __join__(JOIN.INNER, inverted=True) __radd__ = __join__(JOIN.LEFT_OUTER, inverted=True) __rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True) __ror__ = __join__(JOIN.FULL_OUTER, inverted=True) __rmul__ = __join__(JOIN.CROSS, inverted=True) class _BoundTableContext(_callable_context_manager): def __init__(self, table, database): self.table = table self.database = database def __enter__(self): self._orig_database = self.table._database self.table.bind(self.database) if self.table._model is not None: self.table._model.bind(self.database) return self.table def __exit__(self, exc_type, exc_val, exc_tb): self.table.bind(self._orig_database) if self.table._model is not None: self.table._model.bind(self._orig_database) class Table(_HashableSource, BaseTable): def __init__(self, name, columns=None, primary_key=None, schema=None, alias=None, _model=None, _database=None): self.__name__ = name self._columns = columns self._primary_key = primary_key self._schema = schema self._path = (schema, name) if schema else (name,) self._model = _model self._database = _database super(Table, self).__init__(alias=alias) # Allow tables to restrict what columns are available. if columns is not None: self.c = _ExplicitColumn() for column in columns: setattr(self, column, Column(self, column)) if primary_key: col_src = self if self._columns else self.c self.primary_key = getattr(col_src, primary_key) else: self.primary_key = None def clone(self): # Ensure a deep copy of the column instances. return Table( self.__name__, columns=self._columns, primary_key=self._primary_key, schema=self._schema, alias=self._alias, _model=self._model, _database=self._database) def bind(self, database=None): self._database = database return self def bind_ctx(self, database=None): return _BoundTableContext(self, database) def _get_hash(self): return hash((self.__class__, self._path, self._alias, self._model)) @__bind_database__ def select(self, *columns): if not columns and self._columns: columns = [Column(self, column) for column in self._columns] return Select((self,), columns) @__bind_database__ def insert(self, insert=None, columns=None, **kwargs): if kwargs: insert = {} if insert is None else insert src = self if self._columns else self.c for key, value in kwargs.items(): insert[getattr(src, key)] = value return Insert(self, insert=insert, columns=columns) @__bind_database__ def replace(self, insert=None, columns=None, **kwargs): return (self .insert(insert=insert, columns=columns) .on_conflict('REPLACE')) @__bind_database__ def update(self, update=None, **kwargs): if kwargs: update = {} if update is None else update for key, value in kwargs.items(): src = self if self._columns else self.c update[getattr(src, key)] = value return Update(self, update=update) @__bind_database__ def delete(self): return Delete(self) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(Entity(*self._path)) if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return self.apply_alias(ctx.sql(Entity(*self._path))) else: # Refer to the table using the alias. return self.apply_column(ctx) class Join(BaseTable): def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None): super(Join, self).__init__(alias=alias) self.lhs = lhs self.rhs = rhs self.join_type = join_type self._on = on def on(self, predicate): self._on = predicate return self def __sql__(self, ctx): (ctx .sql(self.lhs) .literal(' %s ' % self.join_type) .sql(self.rhs)) if self._on is not None: ctx.literal(' ON ').sql(self._on) return ctx class ValuesList(_HashableSource, BaseTable): def __init__(self, values, columns=None, alias=None): self._values = values self._columns = columns super(ValuesList, self).__init__(alias=alias) def _get_hash(self): return hash((self.__class__, id(self._values), self._alias)) @Node.copy def columns(self, *names): self._columns = names def __sql__(self, ctx): if self._alias: ctx.alias_manager[self] = self._alias if ctx.scope == SCOPE_SOURCE or ctx.scope == SCOPE_NORMAL: with ctx(parentheses=not ctx.parentheses): ctx = (ctx .literal('VALUES ') .sql(CommaNodeList([ EnclosedNodeList(row) for row in self._values]))) if ctx.scope == SCOPE_SOURCE: ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self])) if self._columns: entities = [Entity(c) for c in self._columns] ctx.sql(EnclosedNodeList(entities)) else: ctx.sql(Entity(ctx.alias_manager[self])) return ctx class CTE(_HashableSource, Source): def __init__(self, name, query, recursive=False, columns=None, materialized=None): self._alias = name self._query = query self._recursive = recursive self._materialized = materialized if columns is not None: columns = [Entity(c) if isinstance(c, basestring) else c for c in columns] self._columns = columns query._cte_list = () super(CTE, self).__init__(alias=name) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns ' 'from the CTE to select.') query = (Select((self,), columns) .with_cte(self) .bind(self._query._database)) try: query = query.objects(self._query.model) except AttributeError: pass return query def _get_hash(self): return hash((self.__class__, self._alias, id(self._query))) def union_all(self, rhs): clone = self._query.clone() return CTE(self._alias, clone + rhs, self._recursive, self._columns) __add__ = union_all def union(self, rhs): clone = self._query.clone() return CTE(self._alias, clone | rhs, self._recursive, self._columns) __or__ = union def __sql__(self, ctx): if ctx.scope != SCOPE_CTE: return ctx.sql(Entity(self._alias)) with ctx.push_alias(): ctx.alias_manager[self] = self._alias ctx.sql(Entity(self._alias)) if self._columns: ctx.literal(' ').sql(EnclosedNodeList(self._columns)) ctx.literal(' AS ') if self._materialized: ctx.literal('MATERIALIZED ') elif self._materialized is False: ctx.literal('NOT MATERIALIZED ') with ctx.scope_normal(parentheses=True): ctx.sql(self._query) return ctx class ColumnBase(Node): _converter = None @Node.copy def converter(self, converter=None): self._converter = converter def alias(self, alias): if alias: return Alias(self, alias) return self def unalias(self): return self def cast(self, as_type): return Cast(self, as_type) def asc(self, collation=None, nulls=None): return Asc(self, collation=collation, nulls=nulls) __pos__ = asc def desc(self, collation=None, nulls=None): return Desc(self, collation=collation, nulls=nulls) __neg__ = desc def __invert__(self): return Negated(self) def _e(op, inv=False): """ Lightweight factory which returns a method that builds an Expression consisting of the left-hand and right-hand operands, using `op`. """ def inner(self, rhs): if inv: return Expression(rhs, op, self) return Expression(self, op, rhs) return inner __and__ = _e(OP.AND) __or__ = _e(OP.OR) __add__ = _e(OP.ADD) __sub__ = _e(OP.SUB) __mul__ = _e(OP.MUL) __div__ = __truediv__ = _e(OP.DIV) __xor__ = _e(OP.XOR) __radd__ = _e(OP.ADD, inv=True) __rsub__ = _e(OP.SUB, inv=True) __rmul__ = _e(OP.MUL, inv=True) __rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True) __rand__ = _e(OP.AND, inv=True) __ror__ = _e(OP.OR, inv=True) __rxor__ = _e(OP.XOR, inv=True) def __eq__(self, rhs): op = OP.IS if rhs is None else OP.EQ return Expression(self, op, rhs) def __ne__(self, rhs): op = OP.IS_NOT if rhs is None else OP.NE return Expression(self, op, rhs) __lt__ = _e(OP.LT) __le__ = _e(OP.LTE) __gt__ = _e(OP.GT) __ge__ = _e(OP.GTE) __lshift__ = _e(OP.IN) __rshift__ = _e(OP.IS) __mod__ = _e(OP.LIKE) __pow__ = _e(OP.ILIKE) like = _e(OP.LIKE) ilike = _e(OP.ILIKE) bin_and = _e(OP.BIN_AND) bin_or = _e(OP.BIN_OR) in_ = _e(OP.IN) not_in = _e(OP.NOT_IN) regexp = _e(OP.REGEXP) # Special expressions. def is_null(self, is_null=True): op = OP.IS if is_null else OP.IS_NOT return Expression(self, op, None) def _escape_like_expr(self, s, template): if s.find('_') >= 0 or s.find('%') >= 0 or s.find('\\') >= 0: s = s.replace('\\', '\\\\').replace('_', '\\_').replace('%', '\\%') return NodeList((template % s, SQL('ESCAPE'), '\\')) return template % s def contains(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, Expression(rhs, OP.CONCAT, '%')) else: rhs = self._escape_like_expr(rhs, '%%%s%%') return Expression(self, OP.ILIKE, rhs) def startswith(self, rhs): if isinstance(rhs, Node): rhs = Expression(rhs, OP.CONCAT, '%') else: rhs = self._escape_like_expr(rhs, '%s%%') return Expression(self, OP.ILIKE, rhs) def endswith(self, rhs): if isinstance(rhs, Node): rhs = Expression('%', OP.CONCAT, rhs) else: rhs = self._escape_like_expr(rhs, '%%%s') return Expression(self, OP.ILIKE, rhs) def between(self, lo, hi): return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi))) def concat(self, rhs): return StringExpression(self, OP.CONCAT, rhs) def regexp(self, rhs): return Expression(self, OP.REGEXP, rhs) def iregexp(self, rhs): return Expression(self, OP.IREGEXP, rhs) def __getitem__(self, item): if isinstance(item, slice): if item.start is None or item.stop is None: raise ValueError('BETWEEN range must have both a start- and ' 'end-point.') return self.between(item.start, item.stop) return self == item def distinct(self): return NodeList((SQL('DISTINCT'), self)) def collate(self, collation): return NodeList((self, SQL('COLLATE %s' % collation))) def get_sort_key(self, ctx): return () class Column(ColumnBase): def __init__(self, source, name): self.source = source self.name = name def get_sort_key(self, ctx): if ctx.scope == SCOPE_VALUES: return (self.name,) else: return self.source.get_sort_key(ctx) + (self.name,) def __hash__(self): return hash((self.source, self.name)) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: return ctx.sql(Entity(self.name)) else: with ctx.scope_column(): return ctx.sql(self.source).literal('.').sql(Entity(self.name)) class WrappedNode(ColumnBase): def __init__(self, node): self.node = node self._coerce = getattr(node, '_coerce', True) self._converter = getattr(node, '_converter', None) def is_alias(self): return self.node.is_alias() def unwrap(self): return self.node.unwrap() class EntityFactory(object): __slots__ = ('node',) def __init__(self, node): self.node = node def __getattr__(self, attr): return Entity(self.node, attr) class _DynamicEntity(object): __slots__ = () def __get__(self, instance, instance_type=None): if instance is not None: return EntityFactory(instance._alias) # Implements __getattr__(). return self class Alias(WrappedNode): c = _DynamicEntity() def __init__(self, node, alias): super(Alias, self).__init__(node) self._alias = alias def __hash__(self): return hash(self._alias) def alias(self, alias=None): if alias is None: return self.node else: return Alias(self.node, alias) def unalias(self): return self.node def is_alias(self): return True def __sql__(self, ctx): if ctx.scope == SCOPE_SOURCE: return (ctx .sql(self.node) .literal(' AS ') .sql(Entity(self._alias))) else: return ctx.sql(Entity(self._alias)) class Negated(WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): return ctx.literal('NOT ').sql(self.node) class BitwiseMixin(object): def __and__(self, other): return self.bin_and(other) def __or__(self, other): return self.bin_or(other) def __sub__(self, other): return self.bin_and(other.bin_negated()) def __invert__(self): return BitwiseNegated(self) class BitwiseNegated(BitwiseMixin, WrappedNode): def __invert__(self): return self.node def __sql__(self, ctx): if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op return ctx.literal(op_sql).sql(self.node) class Value(ColumnBase): def __init__(self, value, converter=None, unpack=True): self.value = value self.converter = converter self.multi = unpack and isinstance(self.value, multi_types) if self.multi: self.values = [] for item in self.value: if isinstance(item, Node): self.values.append(item) else: self.values.append(Value(item, self.converter)) def __sql__(self, ctx): if self.multi: # For multi-part values (e.g. lists of IDs). return ctx.sql(EnclosedNodeList(self.values)) return ctx.value(self.value, self.converter) def AsIs(value): return Value(value, unpack=False) class Cast(WrappedNode): def __init__(self, node, cast): super(Cast, self).__init__(node) self._cast = cast self._coerce = False def __sql__(self, ctx): return (ctx .literal('CAST(') .sql(self.node) .literal(' AS %s)' % self._cast)) class Ordering(WrappedNode): def __init__(self, node, direction, collation=None, nulls=None): super(Ordering, self).__init__(node) self.direction = direction self.collation = collation self.nulls = nulls if nulls and nulls.lower() not in ('first', 'last'): raise ValueError('Ordering nulls= parameter must be "first" or ' '"last", got: %s' % nulls) def collate(self, collation=None): return Ordering(self.node, self.direction, collation) def _null_ordering_case(self, nulls): if nulls.lower() == 'last': ifnull, notnull = 1, 0 elif nulls.lower() == 'first': ifnull, notnull = 0, 1 else: raise ValueError('unsupported value for nulls= ordering.') return Case(None, ((self.node.is_null(), ifnull),), notnull) def __sql__(self, ctx): if self.nulls and not ctx.state.nulls_ordering: ctx.sql(self._null_ordering_case(self.nulls)).literal(', ') ctx.sql(self.node).literal(' %s' % self.direction) if self.collation: ctx.literal(' COLLATE %s' % self.collation) if self.nulls and ctx.state.nulls_ordering: ctx.literal(' NULLS %s' % self.nulls) return ctx def Asc(node, collation=None, nulls=None): return Ordering(node, 'ASC', collation, nulls) def Desc(node, collation=None, nulls=None): return Ordering(node, 'DESC', collation, nulls) class Expression(ColumnBase): def __init__(self, lhs, op, rhs, flat=False): self.lhs = lhs self.op = op self.rhs = rhs self.flat = flat def __sql__(self, ctx): overrides = {'parentheses': not self.flat, 'in_expr': True} # First attempt to unwrap the node on the left-hand-side, so that we # can get at the underlying Field if one is present. node = raw_node = self.lhs if isinstance(raw_node, WrappedNode): node = raw_node.unwrap() # Set up the appropriate converter if we have a field on the left side. if isinstance(node, Field) and raw_node._coerce: overrides['converter'] = node.db_value overrides['is_fk_expr'] = isinstance(node, ForeignKeyField) else: overrides['converter'] = None if ctx.state.operations: op_sql = ctx.state.operations.get(self.op, self.op) else: op_sql = self.op with ctx(**overrides): # Postgresql reports an error for IN/NOT IN (), so convert to # the equivalent boolean expression. op_in = self.op == OP.IN or self.op == OP.NOT_IN if op_in and ctx.as_new().parse(self.rhs)[0] == '()': return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1') return (ctx .sql(self.lhs) .literal(' %s ' % op_sql) .sql(self.rhs)) class StringExpression(Expression): def __add__(self, rhs): return self.concat(rhs) def __radd__(self, lhs): return StringExpression(lhs, OP.CONCAT, self) class Entity(ColumnBase): def __init__(self, *path): self._path = [part.replace('"', '""') for part in path if part] def __getattr__(self, attr): return Entity(*self._path + [attr]) def get_sort_key(self, ctx): return tuple(self._path) def __hash__(self): return hash((self.__class__.__name__, tuple(self._path))) def __sql__(self, ctx): return ctx.literal(quote(self._path, ctx.state.quote or '""')) class SQL(ColumnBase): def __init__(self, sql, params=None): self.sql = sql self.params = params def __sql__(self, ctx): ctx.literal(self.sql) if self.params: for param in self.params: ctx.value(param, False, add_param=False) return ctx def Check(constraint, name=None): check = SQL('CHECK (%s)' % constraint) if not name: return check return NodeList((SQL('CONSTRAINT'), Entity(name), check)) class Function(ColumnBase): def __init__(self, name, arguments, coerce=True, python_value=None): self.name = name self.arguments = arguments self._filter = None self._order_by = None self._python_value = python_value if name and name.lower() in ('sum', 'count', 'cast', 'array_agg'): self._coerce = False else: self._coerce = coerce def __getattr__(self, attr): def decorator(*args, **kwargs): return Function(attr, args, **kwargs) return decorator @Node.copy def filter(self, where=None): self._filter = where @Node.copy def order_by(self, *ordering): self._order_by = ordering @Node.copy def python_value(self, func=None): self._python_value = func def over(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, window=None, exclude=None): if isinstance(partition_by, Window) and window is None: window = partition_by if window is not None: node = WindowAlias(window) else: node = Window(partition_by=partition_by, order_by=order_by, start=start, end=end, frame_type=frame_type, exclude=exclude, _inline=True) return NodeList((self, SQL('OVER'), node)) def __sql__(self, ctx): ctx.literal(self.name) if not len(self.arguments): ctx.literal('()') else: args = self.arguments # If this is an ordered aggregate, then we will modify the last # argument to append the ORDER BY ... clause. We do this to avoid # double-wrapping any expression args in parentheses, as NodeList # has a special check (hack) in place to work around this. if self._order_by: args = list(args) args[-1] = NodeList((args[-1], SQL('ORDER BY'), CommaNodeList(self._order_by))) with ctx(in_function=True, function_arg_count=len(self.arguments)): ctx.sql(EnclosedNodeList([ (arg if isinstance(arg, Node) else Value(arg, False)) for arg in args])) if self._filter: ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')') return ctx fn = Function(None, None) class Window(Node): # Frame start/end and frame exclusion. CURRENT_ROW = SQL('CURRENT ROW') GROUP = SQL('GROUP') TIES = SQL('TIES') NO_OTHERS = SQL('NO OTHERS') # Frame types. GROUPS = 'GROUPS' RANGE = 'RANGE' ROWS = 'ROWS' def __init__(self, partition_by=None, order_by=None, start=None, end=None, frame_type=None, extends=None, exclude=None, alias=None, _inline=False): super(Window, self).__init__() if start is not None and not isinstance(start, SQL): start = SQL(start) if end is not None and not isinstance(end, SQL): end = SQL(end) self.partition_by = ensure_tuple(partition_by) self.order_by = ensure_tuple(order_by) self.start = start self.end = end if self.start is None and self.end is not None: raise ValueError('Cannot specify WINDOW end without start.') self._alias = alias or 'w' self._inline = _inline self.frame_type = frame_type self._extends = extends self._exclude = exclude def alias(self, alias=None): self._alias = alias or 'w' return self @Node.copy def as_range(self): self.frame_type = Window.RANGE @Node.copy def as_rows(self): self.frame_type = Window.ROWS @Node.copy def as_groups(self): self.frame_type = Window.GROUPS @Node.copy def extends(self, window=None): self._extends = window @Node.copy def exclude(self, frame_exclusion=None): if isinstance(frame_exclusion, basestring): frame_exclusion = SQL(frame_exclusion) self._exclude = frame_exclusion @staticmethod def following(value=None): if value is None: return SQL('UNBOUNDED FOLLOWING') return SQL('%d FOLLOWING' % value) @staticmethod def preceding(value=None): if value is None: return SQL('UNBOUNDED PRECEDING') return SQL('%d PRECEDING' % value) def __sql__(self, ctx): if ctx.scope != SCOPE_SOURCE and not self._inline: ctx.literal(self._alias) ctx.literal(' AS ') with ctx(parentheses=True): parts = [] if self._extends is not None: ext = self._extends if isinstance(ext, Window): ext = SQL(ext._alias) elif isinstance(ext, basestring): ext = SQL(ext) parts.append(ext) if self.partition_by: parts.extend(( SQL('PARTITION BY'), CommaNodeList(self.partition_by))) if self.order_by: parts.extend(( SQL('ORDER BY'), CommaNodeList(self.order_by))) if self.start is not None and self.end is not None: frame = self.frame_type or 'ROWS' parts.extend(( SQL('%s BETWEEN' % frame), self.start, SQL('AND'), self.end)) elif self.start is not None: parts.extend((SQL(self.frame_type or 'ROWS'), self.start)) elif self.frame_type is not None: parts.append(SQL('%s UNBOUNDED PRECEDING' % self.frame_type)) if self._exclude is not None: parts.extend((SQL('EXCLUDE'), self._exclude)) ctx.sql(NodeList(parts)) return ctx class WindowAlias(Node): def __init__(self, window): self.window = window def alias(self, window_alias): self.window._alias = window_alias return self def __sql__(self, ctx): return ctx.literal(self.window._alias or 'w') class ForUpdate(Node): def __init__(self, expr, of=None, nowait=None): expr = 'FOR UPDATE' if expr is True else expr if expr.lower().endswith('nowait'): expr = expr[:-7] # Strip off the "nowait" bit. nowait = True self._expr = expr if of is not None and not isinstance(of, (list, set, tuple)): of = (of,) self._of = of self._nowait = nowait def __sql__(self, ctx): ctx.literal(self._expr) if self._of is not None: ctx.literal(' OF ').sql(CommaNodeList(self._of)) if self._nowait: ctx.literal(' NOWAIT') return ctx def Case(predicate, expression_tuples, default=None): clauses = [SQL('CASE')] if predicate is not None: clauses.append(predicate) for expr, value in expression_tuples: clauses.extend((SQL('WHEN'), expr, SQL('THEN'), value)) if default is not None: clauses.extend((SQL('ELSE'), default)) clauses.append(SQL('END')) return NodeList(clauses) class NodeList(ColumnBase): def __init__(self, nodes, glue=' ', parens=False): self.nodes = nodes self.glue = glue self.parens = parens if parens and len(self.nodes) == 1 and \ isinstance(self.nodes[0], Expression) and \ not self.nodes[0].flat: # Hack to avoid double-parentheses. self.nodes = (self.nodes[0].clone(),) self.nodes[0].flat = True def __sql__(self, ctx): n_nodes = len(self.nodes) if n_nodes == 0: return ctx.literal('()') if self.parens else ctx with ctx(parentheses=self.parens): for i in range(n_nodes - 1): ctx.sql(self.nodes[i]) ctx.literal(self.glue) ctx.sql(self.nodes[n_nodes - 1]) return ctx def CommaNodeList(nodes): return NodeList(nodes, ', ') def EnclosedNodeList(nodes): return NodeList(nodes, ', ', True) class _Namespace(Node): __slots__ = ('_name',) def __init__(self, name): self._name = name def __getattr__(self, attr): return NamespaceAttribute(self, attr) __getitem__ = __getattr__ class NamespaceAttribute(ColumnBase): def __init__(self, namespace, attribute): self._namespace = namespace self._attribute = attribute def __sql__(self, ctx): return (ctx .literal(self._namespace._name + '.') .sql(Entity(self._attribute))) EXCLUDED = _Namespace('EXCLUDED') class DQ(ColumnBase): def __init__(self, **query): super(DQ, self).__init__() self.query = query self._negated = False @Node.copy def __invert__(self): self._negated = not self._negated def clone(self): node = DQ(**self.query) node._negated = self._negated return node #: Represent a row tuple. Tuple = lambda *a: EnclosedNodeList(a) class QualifiedNames(WrappedNode): def __sql__(self, ctx): with ctx.scope_column(): return ctx.sql(self.node) def qualify_names(node): # Search a node heirarchy to ensure that any column-like objects are # referenced using fully-qualified names. if isinstance(node, Expression): return node.__class__(qualify_names(node.lhs), node.op, qualify_names(node.rhs), node.flat) elif isinstance(node, ColumnBase): return QualifiedNames(node) return node class OnConflict(Node): def __init__(self, action=None, update=None, preserve=None, where=None, conflict_target=None, conflict_where=None, conflict_constraint=None): self._action = action self._update = update self._preserve = ensure_tuple(preserve) self._where = where if conflict_target is not None and conflict_constraint is not None: raise ValueError('only one of "conflict_target" and ' '"conflict_constraint" may be specified.') self._conflict_target = ensure_tuple(conflict_target) self._conflict_where = conflict_where self._conflict_constraint = conflict_constraint def get_conflict_statement(self, ctx, query): return ctx.state.conflict_statement(self, query) def get_conflict_update(self, ctx, query): return ctx.state.conflict_update(self, query) @Node.copy def preserve(self, *columns): self._preserve = columns @Node.copy def update(self, _data=None, **kwargs): if _data and kwargs and not isinstance(_data, dict): raise ValueError('Cannot mix data with keyword arguments in the ' 'OnConflict update method.') _data = _data or {} if kwargs: _data.update(kwargs) self._update = _data @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def conflict_target(self, *constraints): self._conflict_constraint = None self._conflict_target = constraints @Node.copy def conflict_where(self, *expressions): if self._conflict_where is not None: expressions = (self._conflict_where,) + expressions self._conflict_where = reduce(operator.and_, expressions) @Node.copy def conflict_constraint(self, constraint): self._conflict_constraint = constraint self._conflict_target = None def database_required(method): @wraps(method) def inner(self, database=None, *args, **kwargs): database = self._database if database is None else database if not database: raise InterfaceError('Query must be bound to a database in order ' 'to call "%s".' % method.__name__) return method(self, database, *args, **kwargs) return inner # BASE QUERY INTERFACE. class BaseQuery(Node): default_row_type = ROW.DICT def __init__(self, _database=None, **kwargs): self._database = _database self._cursor_wrapper = None self._row_type = None self._constructor = None super(BaseQuery, self).__init__(**kwargs) def bind(self, database=None): self._database = database return self def clone(self): query = super(BaseQuery, self).clone() query._cursor_wrapper = None return query @Node.copy def dicts(self, as_dict=True): self._row_type = ROW.DICT if as_dict else None return self @Node.copy def tuples(self, as_tuple=True): self._row_type = ROW.TUPLE if as_tuple else None return self @Node.copy def namedtuples(self, as_namedtuple=True): self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None return self @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR if constructor else None self._constructor = constructor return self def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.DICT: return DictCursorWrapper(cursor) elif row_type == ROW.TUPLE: return CursorWrapper(cursor) elif row_type == ROW.NAMED_TUPLE: return NamedTupleCursorWrapper(cursor) elif row_type == ROW.CONSTRUCTOR: return ObjectCursorWrapper(cursor, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def __sql__(self, ctx): raise NotImplementedError def sql(self): if self._database: context = self._database.get_sql_context() else: context = Context() return context.parse(self) @database_required def execute(self, database): return self._execute(database) def _execute(self, database): raise NotImplementedError def iterator(self, database=None): return iter(self.execute(database).iterator()) def _ensure_execution(self): if not self._cursor_wrapper: if not self._database: raise ValueError('Query has not been executed.') self.execute() def __iter__(self): self._ensure_execution() return iter(self._cursor_wrapper) def __getitem__(self, value): self._ensure_execution() if isinstance(value, slice): index = value.stop else: index = value if index is not None: index = index + 1 if index >= 0 else 0 self._cursor_wrapper.fill_cache(index) return self._cursor_wrapper.row_cache[value] def __len__(self): self._ensure_execution() return len(self._cursor_wrapper) def __str__(self): return query_to_string(self) class RawQuery(BaseQuery): def __init__(self, sql=None, params=None, **kwargs): super(RawQuery, self).__init__(**kwargs) self._sql = sql self._params = params def __sql__(self, ctx): ctx.literal(self._sql) if self._params: for param in self._params: ctx.value(param, add_param=False) return ctx def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper class Query(BaseQuery): def __init__(self, where=None, order_by=None, limit=None, offset=None, **kwargs): super(Query, self).__init__(**kwargs) self._where = where self._order_by = order_by self._limit = limit self._offset = offset self._cte_list = None @Node.copy def with_cte(self, *cte_list): self._cte_list = cte_list @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def orwhere(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.or_, expressions) @Node.copy def order_by(self, *values): self._order_by = values @Node.copy def order_by_extend(self, *values): self._order_by = ((self._order_by or ()) + values) or None @Node.copy def limit(self, value=None): self._limit = value @Node.copy def offset(self, value=None): self._offset = value @Node.copy def paginate(self, page, paginate_by=20): if page > 0: page -= 1 self._limit = paginate_by self._offset = page * paginate_by def _apply_ordering(self, ctx): if self._order_by: (ctx .literal(' ORDER BY ') .sql(CommaNodeList(self._order_by))) if self._limit is not None or (self._offset is not None and ctx.state.limit_max): limit = ctx.state.limit_max if self._limit is None else self._limit ctx.literal(' LIMIT ').sql(limit) if self._offset is not None: ctx.literal(' OFFSET ').sql(self._offset) return ctx def __sql__(self, ctx): if self._cte_list: # The CTE scope is only used at the very beginning of the query, # when we are describing the various CTEs we will be using. recursive = any(cte._recursive for cte in self._cte_list) # Explicitly disable the "subquery" flag here, so as to avoid # unnecessary parentheses around subsequent selects. with ctx.scope_cte(subquery=False): (ctx .literal('WITH RECURSIVE ' if recursive else 'WITH ') .sql(CommaNodeList(self._cte_list)) .literal(' ')) return ctx def __compound_select__(operation, inverted=False): def method(self, other): if inverted: self, other = other, self return CompoundSelectQuery(self, operation, other) return method class SelectQuery(Query): union_all = __add__ = __compound_select__('UNION ALL') union = __or__ = __compound_select__('UNION') intersect = __and__ = __compound_select__('INTERSECT') except_ = __sub__ = __compound_select__('EXCEPT') __radd__ = __compound_select__('UNION ALL', inverted=True) __ror__ = __compound_select__('UNION', inverted=True) __rand__ = __compound_select__('INTERSECT', inverted=True) __rsub__ = __compound_select__('EXCEPT', inverted=True) def select_from(self, *columns): if not columns: raise ValueError('select_from() must specify one or more columns.') query = (Select((self,), columns) .bind(self._database)) if getattr(self, 'model', None) is not None: # Bind to the sub-select's model type, if defined. query = query.objects(self.model) return query class SelectBase(_HashableSource, Source, SelectQuery): def _get_hash(self): return hash((self.__class__, self._alias or id(self))) def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper @database_required def peek(self, database, n=1): rows = self.execute(database)[:n] if rows: return rows[0] if n == 1 else rows @database_required def first(self, database, n=1): if self._limit != n: self._limit = n self._cursor_wrapper = None return self.peek(database, n=n) @database_required def scalar(self, database, as_tuple=False): row = self.tuples().peek(database) return row[0] if row and not as_tuple else row @database_required def count(self, database, clear_limit=False): clone = self.order_by().alias('_wrapped') if clear_limit: clone._limit = clone._offset = None try: if clone._having is None and clone._group_by is None and \ clone._windows is None and clone._distinct is None and \ clone._simple_distinct is not True: clone = clone.select(SQL('1')) except AttributeError: pass return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database) @database_required def exists(self, database): clone = self.columns(SQL('1')) clone._limit = 1 clone._offset = None return bool(clone.scalar()) @database_required def get(self, database): self._cursor_wrapper = None try: return self.execute(database)[0] except IndexError: pass # QUERY IMPLEMENTATIONS. class CompoundSelectQuery(SelectBase): def __init__(self, lhs, op, rhs): super(CompoundSelectQuery, self).__init__() self.lhs = lhs self.op = op self.rhs = rhs @property def _returning(self): return self.lhs._returning @database_required def exists(self, database): query = Select((self.limit(1),), (SQL('1'),)).bind(database) return bool(query.scalar()) def _get_query_key(self): return (self.lhs.get_query_key(), self.rhs.get_query_key()) def _wrap_parens(self, ctx, subq): csq_setting = ctx.state.compound_select_parentheses if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER: return False elif csq_setting == CSQ_PARENTHESES_ALWAYS: return True elif csq_setting == CSQ_PARENTHESES_UNNESTED: if ctx.state.in_expr or ctx.state.in_function: # If this compound select query is being used inside an # expression, e.g., an IN or EXISTS(). return False # If the query on the left or right is itself a compound select # query, then we do not apply parentheses. However, if it is a # regular SELECT query, we will apply parentheses. return not isinstance(subq, CompoundSelectQuery) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) # Call parent method to handle any CTEs. super(CompoundSelectQuery, self).__sql__(ctx) outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE) with ctx(parentheses=outer_parens): # Should the left-hand query be wrapped in parentheses? lhs_parens = self._wrap_parens(ctx, self.lhs) with ctx.scope_normal(parentheses=lhs_parens, subquery=False): ctx.sql(self.lhs) ctx.literal(' %s ' % self.op) with ctx.push_alias(): # Should the right-hand query be wrapped in parentheses? rhs_parens = self._wrap_parens(ctx, self.rhs) with ctx.scope_normal(parentheses=rhs_parens, subquery=False): ctx.sql(self.rhs) # Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that # entity names are not fully-qualified. This is a bit of a hack, as # we're relying on the logic in Column.__sql__() to not fully # qualify column names. with ctx.scope_values(): self._apply_ordering(ctx) return self.apply_alias(ctx) class Select(SelectBase): def __init__(self, from_list=None, columns=None, group_by=None, having=None, distinct=None, windows=None, for_update=None, for_update_of=None, nowait=None, lateral=None, **kwargs): super(Select, self).__init__(**kwargs) self._from_list = (list(from_list) if isinstance(from_list, tuple) else from_list) or [] self._returning = columns self._group_by = group_by self._having = having self._windows = None self._for_update = for_update # XXX: consider reorganizing. self._for_update_of = for_update_of self._for_update_nowait = nowait self._lateral = lateral self._distinct = self._simple_distinct = None if distinct: if isinstance(distinct, bool): self._simple_distinct = distinct else: self._distinct = distinct self._cursor_wrapper = None def clone(self): clone = super(Select, self).clone() if clone._from_list: clone._from_list = list(clone._from_list) return clone @Node.copy def columns(self, *columns, **kwargs): self._returning = columns select = columns @Node.copy def select_extend(self, *columns): self._returning = tuple(self._returning) + columns @Node.copy def from_(self, *sources): self._from_list = list(sources) @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None): if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping def group_by_extend(self, *values): """@Node.copy used from group_by() call""" group_by = tuple(self._group_by or ()) + values return self.group_by(*group_by) @Node.copy def having(self, *expressions): if self._having is not None: expressions = (self._having,) + expressions self._having = reduce(operator.and_, expressions) @Node.copy def distinct(self, *columns): if len(columns) == 1 and (columns[0] is True or columns[0] is False): self._simple_distinct = columns[0] else: self._simple_distinct = False self._distinct = columns @Node.copy def window(self, *windows): self._windows = windows if windows else None @Node.copy def for_update(self, for_update=True, of=None, nowait=None): if not for_update and (of is not None or nowait): for_update = True self._for_update = for_update self._for_update_of = of self._for_update_nowait = nowait @Node.copy def lateral(self, lateral=True): self._lateral = lateral def _get_query_key(self): return self._alias def __sql_selection__(self, ctx, is_subquery=False): return ctx.sql(CommaNodeList(self._returning)) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) if self._lateral and ctx.scope == SCOPE_SOURCE: ctx.literal('LATERAL ') is_subquery = ctx.subquery state = { 'converter': None, 'in_function': False, 'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE), 'subquery': True, } if ctx.state.in_function and ctx.state.function_arg_count == 1: state['parentheses'] = False with ctx.scope_normal(**state): # Defer calling parent SQL until here. This ensures that any CTEs # for this query will be properly nested if this query is a # sub-select or is used in an expression. See GH#1809 for example. super(Select, self).__sql__(ctx) ctx.literal('SELECT ') if self._simple_distinct or self._distinct is not None: ctx.literal('DISTINCT ') if self._distinct: (ctx .literal('ON ') .sql(EnclosedNodeList(self._distinct)) .literal(' ')) with ctx.scope_source(): ctx = self.__sql_selection__(ctx, is_subquery) if self._from_list: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from_list)) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) if self._group_by: ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by)) if self._having is not None: ctx.literal(' HAVING ').sql(self._having) if self._windows is not None: ctx.literal(' WINDOW ') ctx.sql(CommaNodeList(self._windows)) # Apply ORDER BY, LIMIT, OFFSET. self._apply_ordering(ctx) if self._for_update: if not ctx.state.for_update: raise ValueError('FOR UPDATE specified but not supported ' 'by database.') ctx.literal(' ') ctx.sql(ForUpdate(self._for_update, self._for_update_of, self._for_update_nowait)) # If the subquery is inside a function -or- we are evaluating a # subquery on either side of an expression w/o an explicit alias, do # not generate an alias + AS clause. if ctx.state.in_function or (ctx.state.in_expr and self._alias is None): return ctx return self.apply_alias(ctx) class _WriteQuery(Query): def __init__(self, table, returning=None, **kwargs): self.table = table self._returning = returning self._return_cursor = True if returning else False super(_WriteQuery, self).__init__(**kwargs) @Node.copy def returning(self, *returning): self._returning = returning self._return_cursor = True if returning else False def apply_returning(self, ctx): if self._returning: with ctx.scope_source(): ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning)) return ctx def _execute(self, database): if self._returning: cursor = self.execute_returning(database) else: cursor = database.execute(self) return self.handle_result(database, cursor) def execute_returning(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper def handle_result(self, database, cursor): if self._return_cursor: return cursor return database.rows_affected(cursor) def _set_table_alias(self, ctx): ctx.alias_manager[self.table] = self.table.__name__ def __sql__(self, ctx): super(_WriteQuery, self).__sql__(ctx) # We explicitly set the table alias to the table's name, which ensures # that if a sub-select references a column on the outer table, we won't # assign it a new alias (e.g. t2) but will refer to it as table.column. self._set_table_alias(ctx) return ctx class Update(_WriteQuery): def __init__(self, table, update=None, **kwargs): super(Update, self).__init__(table, **kwargs) self._update = update self._from = None @Node.copy def from_(self, *sources): self._from = sources def __sql__(self, ctx): super(Update, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('UPDATE ') expressions = [] for k, v in sorted(self._update.items(), key=ctx.column_sort_key): if not isinstance(v, Node): if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) elif isinstance(v, Model) and isinstance(k, ForeignKeyField): # NB: we want to ensure that when passed a model instance # in the context of a foreign-key, we apply the fk-specific # adaptation of the model. v = k.to_value(v) if not isinstance(v, Value): v = qualify_names(v) expressions.append(NodeList((k, SQL('='), v))) (ctx .sql(self.table) .literal(' SET ') .sql(CommaNodeList(expressions))) if self._from: with ctx.scope_source(parentheses=False): ctx.literal(' FROM ').sql(CommaNodeList(self._from)) if self._where: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Insert(_WriteQuery): SIMPLE = 0 QUERY = 1 MULTI = 2 class DefaultValuesException(Exception): pass def __init__(self, table, insert=None, columns=None, on_conflict=None, **kwargs): super(Insert, self).__init__(table, **kwargs) self._insert = insert self._columns = columns self._on_conflict = on_conflict self._query_type = None def where(self, *expressions): raise NotImplementedError('INSERT queries cannot have a WHERE clause.') @Node.copy def on_conflict_ignore(self, ignore=True): self._on_conflict = OnConflict('IGNORE') if ignore else None @Node.copy def on_conflict_replace(self, replace=True): self._on_conflict = OnConflict('REPLACE') if replace else None @Node.copy def on_conflict(self, *args, **kwargs): self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs) else None) def _simple_insert(self, ctx): if not self._insert: raise self.DefaultValuesException('Error: no data to insert.') return self._generate_insert((self._insert,), ctx) def get_default_data(self): return {} def get_default_columns(self): if self.table._columns: return [getattr(self.table, col) for col in self.table._columns if col != self.table._primary_key] def _generate_insert(self, insert, ctx): rows_iter = iter(insert) columns = self._columns # Load and organize column defaults (if provided). defaults = self.get_default_data() # First figure out what columns are being inserted (if they weren't # specified explicitly). Resulting columns are normalized and ordered. if not columns: try: row = next(rows_iter) except StopIteration: raise self.DefaultValuesException('Error: no rows to insert.') if not isinstance(row, Mapping): columns = self.get_default_columns() if columns is None: raise ValueError('Bulk insert must specify columns.') else: # Infer column names from the dict of data being inserted. accum = [] for column in row: if isinstance(column, basestring): column = getattr(self.table, column) accum.append(column) # Add any columns present in the default data that are not # accounted for by the dictionary of row data. column_set = set(accum) for col in (set(defaults) - column_set): accum.append(col) columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx)) rows_iter = itertools.chain(iter((row,)), rows_iter) else: clean_columns = [] seen = set() for column in columns: if isinstance(column, basestring): column_obj = getattr(self.table, column) else: column_obj = column clean_columns.append(column_obj) seen.add(column_obj) columns = clean_columns for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)): if col not in seen: columns.append(col) fk_fields = set() nullable_columns = set() value_lookups = {} for column in columns: lookups = [column, column.name] if isinstance(column, Field): if column.name != column.column_name: lookups.append(column.column_name) if column.null: nullable_columns.add(column) if isinstance(column, ForeignKeyField): fk_fields.add(column) value_lookups[column] = lookups ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ') columns_converters = [ (column, column.db_value if isinstance(column, Field) else None) for column in columns] all_values = [] for row in rows_iter: values = [] is_dict = isinstance(row, Mapping) for i, (column, converter) in enumerate(columns_converters): try: if is_dict: # The logic is a bit convoluted, but in order to be # flexible in what we accept (dict keyed by # column/field, field name, or underlying column name), # we try accessing the row data dict using each # possible key. If no match is found, throw an error. for lookup in value_lookups[column]: try: val = row[lookup] except KeyError: pass else: break else: raise KeyError else: val = row[i] except (KeyError, IndexError): if column in defaults: val = defaults[column] if callable_(val): val = val() elif column in nullable_columns: val = None else: raise ValueError('Missing value for %s.' % column.name) if not isinstance(val, Node) or (isinstance(val, Model) and column in fk_fields): val = Value(val, converter=converter, unpack=False) values.append(val) all_values.append(EnclosedNodeList(values)) if not all_values: raise self.DefaultValuesException('Error: no data to insert.') with ctx.scope_values(subquery=True): return ctx.sql(CommaNodeList(all_values)) def _query_insert(self, ctx): return (ctx .sql(EnclosedNodeList(self._columns)) .literal(' ') .sql(self._insert)) def _default_values(self, ctx): if not self._database: return ctx.literal('DEFAULT VALUES') return self._database.default_values_insert(ctx) def __sql__(self, ctx): super(Insert, self).__sql__(ctx) with ctx.scope_values(): stmt = None if self._on_conflict is not None: stmt = self._on_conflict.get_conflict_statement(ctx, self) (ctx .sql(stmt or SQL('INSERT')) .literal(' INTO ') .sql(self.table) .literal(' ')) if isinstance(self._insert, Mapping) and not self._columns: try: self._simple_insert(ctx) except self.DefaultValuesException: self._default_values(ctx) self._query_type = Insert.SIMPLE elif isinstance(self._insert, (SelectQuery, SQL)): self._query_insert(ctx) self._query_type = Insert.QUERY else: self._generate_insert(self._insert, ctx) self._query_type = Insert.MULTI if self._on_conflict is not None: update = self._on_conflict.get_conflict_update(ctx, self) if update is not None: ctx.literal(' ').sql(update) return self.apply_returning(ctx) def _execute(self, database): if self._returning is None and database.returning_clause \ and self.table._primary_key: self._returning = (self.table._primary_key,) try: return super(Insert, self)._execute(database) except self.DefaultValuesException: pass def handle_result(self, database, cursor): if self._return_cursor: return cursor if self._query_type != Insert.SIMPLE and not self._returning: return database.rows_affected(cursor) return database.last_insert_id(cursor, self._query_type) class Delete(_WriteQuery): def __sql__(self, ctx): super(Delete, self).__sql__(ctx) with ctx.scope_values(subquery=True): ctx.literal('DELETE FROM ').sql(self.table) if self._where is not None: with ctx.scope_normal(): ctx.literal(' WHERE ').sql(self._where) self._apply_ordering(ctx) return self.apply_returning(ctx) class Index(Node): def __init__(self, name, table, expressions, unique=False, safe=False, where=None, using=None): self._name = name self._table = Entity(table) if not isinstance(table, Table) else table self._expressions = expressions self._where = where self._unique = unique self._safe = safe self._using = using @Node.copy def safe(self, _safe=True): self._safe = _safe @Node.copy def where(self, *expressions): if self._where is not None: expressions = (self._where,) + expressions self._where = reduce(operator.and_, expressions) @Node.copy def using(self, _using=None): self._using = _using def __sql__(self, ctx): statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX ' with ctx.scope_values(subquery=True): ctx.literal(statement) if self._safe: ctx.literal('IF NOT EXISTS ') # Sqlite uses CREATE INDEX <schema>.<name> ON <table>, whereas most # others use: CREATE INDEX <name> ON <schema>.<table>. if ctx.state.index_schema_prefix and \ isinstance(self._table, Table) and self._table._schema: index_name = Entity(self._table._schema, self._name) table_name = Entity(self._table.__name__) else: index_name = Entity(self._name) table_name = self._table ctx.sql(index_name) if self._using is not None and \ ctx.state.index_using_precedes_table: ctx.literal(' USING %s' % self._using) # MySQL style. (ctx .literal(' ON ') .sql(table_name) .literal(' ')) if self._using is not None and not \ ctx.state.index_using_precedes_table: ctx.literal('USING %s ' % self._using) # Postgres/default. ctx.sql(EnclosedNodeList([ SQL(expr) if isinstance(expr, basestring) else expr for expr in self._expressions])) if self._where is not None: ctx.literal(' WHERE ').sql(self._where) return ctx class ModelIndex(Index): def __init__(self, model, fields, unique=False, safe=True, where=None, using=None, name=None): self._model = model if name is None: name = self._generate_name_from_fields(model, fields) if using is None: for field in fields: if isinstance(field, Field) and hasattr(field, 'index_type'): using = field.index_type super(ModelIndex, self).__init__( name=name, table=model._meta.table, expressions=fields, unique=unique, safe=safe, where=where, using=using) def _generate_name_from_fields(self, model, fields): accum = [] for field in fields: if isinstance(field, basestring): accum.append(field.split()[0]) else: if isinstance(field, Node) and not isinstance(field, Field): field = field.unwrap() if isinstance(field, Field): accum.append(field.column_name) if not accum: raise ValueError('Unable to generate a name for the index, please ' 'explicitly specify a name.') clean_field_names = re.sub(r'[^\w]+', '', '_'.join(accum)) meta = model._meta prefix = meta.name if meta.legacy_table_names else meta.table_name return _truncate_constraint_name('_'.join((prefix, clean_field_names))) def _truncate_constraint_name(constraint, maxlen=64): if len(constraint) > maxlen: name_hash = hashlib.md5(constraint.encode('utf-8')).hexdigest() constraint = '%s_%s' % (constraint[:(maxlen - 8)], name_hash[:7]) return constraint # DB-API 2.0 EXCEPTIONS. class PeeweeException(Exception): def __init__(self, *args): if args and isinstance(args[0], Exception): self.orig, args = args[0], args[1:] super(PeeweeException, self).__init__(*args) class ImproperlyConfigured(PeeweeException): pass class DatabaseError(PeeweeException): pass class DataError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InterfaceError(PeeweeException): pass class InternalError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class ExceptionWrapper(object): __slots__ = ('exceptions',) def __init__(self, exceptions): self.exceptions = exceptions def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: return # psycopg2.8 shits out a million cute error types. Try to catch em all. if pg_errors is not None and exc_type.__name__ not in self.exceptions \ and issubclass(exc_type, pg_errors.Error): exc_type = exc_type.__bases__[0] if exc_type.__name__ in self.exceptions: new_type = self.exceptions[exc_type.__name__] exc_args = exc_value.args reraise(new_type, new_type(exc_value, *exc_args), traceback) EXCEPTIONS = { 'ConstraintError': IntegrityError, 'DatabaseError': DatabaseError, 'DataError': DataError, 'IntegrityError': IntegrityError, 'InterfaceError': InterfaceError, 'InternalError': InternalError, 'NotSupportedError': NotSupportedError, 'OperationalError': OperationalError, 'ProgrammingError': ProgrammingError, 'TransactionRollbackError': OperationalError} __exception_wrapper__ = ExceptionWrapper(EXCEPTIONS) # DATABASE INTERFACE AND CONNECTION MANAGEMENT. IndexMetadata = collections.namedtuple( 'IndexMetadata', ('name', 'sql', 'columns', 'unique', 'table')) ColumnMetadata = collections.namedtuple( 'ColumnMetadata', ('name', 'data_type', 'null', 'primary_key', 'table', 'default')) ForeignKeyMetadata = collections.namedtuple( 'ForeignKeyMetadata', ('column', 'dest_table', 'dest_column', 'table')) ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql')) class _ConnectionState(object): def __init__(self, **kwargs): super(_ConnectionState, self).__init__(**kwargs) self.reset() def reset(self): self.closed = True self.conn = None self.ctx = [] self.transactions = [] def set_connection(self, conn): self.conn = conn self.closed = False self.ctx = [] self.transactions = [] class _ConnectionLocal(_ConnectionState, threading.local): pass class _NoopLock(object): __slots__ = () def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class ConnectionContext(_callable_context_manager): __slots__ = ('db',) def __init__(self, db): self.db = db def __enter__(self): if self.db.is_closed(): self.db.connect() def __exit__(self, exc_type, exc_val, exc_tb): self.db.close() class Database(_callable_context_manager): context_class = Context field_types = {} operations = {} param = '?' quote = '""' server_version = None # Feature toggles. commit_select = False compound_select_parentheses = CSQ_PARENTHESES_NEVER for_update = False index_schema_prefix = False index_using_precedes_table = False limit_max = None nulls_ordering = False returning_clause = False safe_create_index = True safe_drop_index = True sequences = False truncate_table = True def __init__(self, database, thread_safe=True, autorollback=False, field_types=None, operations=None, autocommit=None, autoconnect=True, **kwargs): self._field_types = merge_dict(FIELD, self.field_types) self._operations = merge_dict(OP, self.operations) if field_types: self._field_types.update(field_types) if operations: self._operations.update(operations) self.autoconnect = autoconnect self.autorollback = autorollback self.thread_safe = thread_safe if thread_safe: self._state = _ConnectionLocal() self._lock = threading.RLock() else: self._state = _ConnectionState() self._lock = _NoopLock() if autocommit is not None: __deprecated__('Peewee no longer uses the "autocommit" option, as ' 'the semantics now require it to always be True. ' 'Because some database-drivers also use the ' '"autocommit" parameter, you are receiving a ' 'warning so you may update your code and remove ' 'the parameter, as in the future, specifying ' 'autocommit could impact the behavior of the ' 'database driver you are using.') self.connect_params = {} self.init(database, **kwargs) def init(self, database, **kwargs): if not self.is_closed(): self.close() self.database = database self.connect_params.update(kwargs) self.deferred = not bool(database) def __enter__(self): if self.is_closed(): self.connect() ctx = self.atomic() self._state.ctx.append(ctx) ctx.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): ctx = self._state.ctx.pop() try: ctx.__exit__(exc_type, exc_val, exc_tb) finally: if not self._state.ctx: self.close() def connection_context(self): return ConnectionContext(self) def _connect(self): raise NotImplementedError def connect(self, reuse_if_open=False): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if not self._state.closed: if reuse_if_open: return False raise OperationalError('Connection already opened.') self._state.reset() with __exception_wrapper__: self._state.set_connection(self._connect()) if self.server_version is None: self._set_server_version(self._state.conn) self._initialize_connection(self._state.conn) return True def _initialize_connection(self, conn): pass def _set_server_version(self, conn): self.server_version = 0 def close(self): with self._lock: if self.deferred: raise InterfaceError('Error, database must be initialized ' 'before opening a connection.') if self.in_transaction(): raise OperationalError('Attempting to close database while ' 'transaction is open.') is_open = not self._state.closed try: if is_open: with __exception_wrapper__: self._close(self._state.conn) finally: self._state.reset() return is_open def _close(self, conn): conn.close() def is_closed(self): return self._state.closed def is_connection_usable(self): return not self._state.closed def connection(self): if self.is_closed(): self.connect() return self._state.conn def cursor(self, commit=None): if self.is_closed(): if self.autoconnect: self.connect() else: raise InterfaceError('Error, database connection not opened.') return self._state.conn.cursor() def execute_sql(self, sql, params=None, commit=SENTINEL): logger.debug((sql, params)) if commit is SENTINEL: if self.in_transaction(): commit = False elif self.commit_select: commit = True else: commit = not sql[:6].lower().startswith('select') with __exception_wrapper__: cursor = self.cursor(commit) try: cursor.execute(sql, params or ()) except Exception: if self.autorollback and not self.in_transaction(): self.rollback() raise else: if commit and not self.in_transaction(): self.commit() return cursor def execute(self, query, commit=SENTINEL, **context_options): ctx = self.get_sql_context(**context_options) sql, params = ctx.sql(query).query() return self.execute_sql(sql, params, commit=commit) def get_context_options(self): return { 'field_types': self._field_types, 'operations': self._operations, 'param': self.param, 'quote': self.quote, 'compound_select_parentheses': self.compound_select_parentheses, 'conflict_statement': self.conflict_statement, 'conflict_update': self.conflict_update, 'for_update': self.for_update, 'index_schema_prefix': self.index_schema_prefix, 'index_using_precedes_table': self.index_using_precedes_table, 'limit_max': self.limit_max, 'nulls_ordering': self.nulls_ordering, } def get_sql_context(self, **context_options): context = self.get_context_options() if context_options: context.update(context_options) return self.context_class(**context) def conflict_statement(self, on_conflict, query): raise NotImplementedError def conflict_update(self, on_conflict, query): raise NotImplementedError def _build_on_conflict_update(self, on_conflict, query): if on_conflict._conflict_target: stmt = SQL('ON CONFLICT') target = EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in on_conflict._conflict_target]) if on_conflict._conflict_where is not None: target = NodeList([target, SQL('WHERE'), on_conflict._conflict_where]) else: stmt = SQL('ON CONFLICT ON CONSTRAINT') target = on_conflict._conflict_constraint if isinstance(target, basestring): target = Entity(target) updates = [] if on_conflict._preserve: for column in on_conflict._preserve: excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)), glue='.') expression = NodeList((ensure_entity(column), SQL('='), excluded)) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) else: v = QualifiedNames(v) updates.append(NodeList((ensure_entity(k), SQL('='), v))) parts = [stmt, target, SQL('DO UPDATE SET'), CommaNodeList(updates)] if on_conflict._where: parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where))) return NodeList(parts) def last_insert_id(self, cursor, query_type=None): return cursor.lastrowid def rows_affected(self, cursor): return cursor.rowcount def default_values_insert(self, ctx): return ctx.literal('DEFAULT VALUES') def session_start(self): with self._lock: return self.transaction().__enter__() def session_commit(self): with self._lock: try: txn = self.pop_transaction() except IndexError: return False txn.commit(begin=self.in_transaction()) return True def session_rollback(self): with self._lock: try: txn = self.pop_transaction() except IndexError: return False txn.rollback(begin=self.in_transaction()) return True def in_transaction(self): return bool(self._state.transactions) def push_transaction(self, transaction): self._state.transactions.append(transaction) def pop_transaction(self): return self._state.transactions.pop() def transaction_depth(self): return len(self._state.transactions) def top_transaction(self): if self._state.transactions: return self._state.transactions[-1] def atomic(self, *args, **kwargs): return _atomic(self, *args, **kwargs) def manual_commit(self): return _manual(self) def transaction(self, *args, **kwargs): return _transaction(self, *args, **kwargs) def savepoint(self): return _savepoint(self) def begin(self): if self.is_closed(): self.connect() def commit(self): with __exception_wrapper__: return self._state.conn.commit() def rollback(self): with __exception_wrapper__: return self._state.conn.rollback() def batch_commit(self, it, n): for group in chunked(it, n): with self.atomic(): for obj in group: yield obj def table_exists(self, table_name, schema=None): return table_name in self.get_tables(schema=schema) def get_tables(self, schema=None): raise NotImplementedError def get_indexes(self, table, schema=None): raise NotImplementedError def get_columns(self, table, schema=None): raise NotImplementedError def get_primary_keys(self, table, schema=None): raise NotImplementedError def get_foreign_keys(self, table, schema=None): raise NotImplementedError def sequence_exists(self, seq): raise NotImplementedError def create_tables(self, models, **options): for model in sort_models(models): model.create_table(**options) def drop_tables(self, models, **kwargs): for model in reversed(sort_models(models)): model.drop_table(**kwargs) def extract_date(self, date_part, date_field): raise NotImplementedError def truncate_date(self, date_part, date_field): raise NotImplementedError def to_timestamp(self, date_field): raise NotImplementedError def from_timestamp(self, date_field): raise NotImplementedError def random(self): return fn.random() def bind(self, models, bind_refs=True, bind_backrefs=True): for model in models: model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs) def bind_ctx(self, models, bind_refs=True, bind_backrefs=True): return _BoundModelsContext(models, self, bind_refs, bind_backrefs) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('0'))) def __pragma__(name): def __get__(self): return self.pragma(name) def __set__(self, value): return self.pragma(name, value) return property(__get__, __set__) class SqliteDatabase(Database): field_types = { 'BIGAUTO': FIELD.AUTO, 'BIGINT': FIELD.INT, 'BOOL': FIELD.INT, 'DOUBLE': FIELD.FLOAT, 'SMALLINT': FIELD.INT, 'UUID': FIELD.TEXT} operations = { 'LIKE': 'GLOB', 'ILIKE': 'LIKE'} index_schema_prefix = True limit_max = -1 server_version = __sqlite_version__ truncate_table = False def __init__(self, database, *args, **kwargs): self._pragmas = kwargs.pop('pragmas', ()) super(SqliteDatabase, self).__init__(database, *args, **kwargs) self._aggregates = {} self._collations = {} self._functions = {} self._window_functions = {} self._table_functions = [] self._extensions = set() self._attached = {} self.register_function(_sqlite_date_part, 'date_part', 2) self.register_function(_sqlite_date_trunc, 'date_trunc', 2) self.nulls_ordering = self.server_version >= (3, 30, 0) def init(self, database, pragmas=None, timeout=5, **kwargs): if pragmas is not None: self._pragmas = pragmas if isinstance(self._pragmas, dict): self._pragmas = list(self._pragmas.items()) self._timeout = timeout super(SqliteDatabase, self).init(database, **kwargs) def _set_server_version(self, conn): pass def _connect(self): if sqlite3 is None: raise ImproperlyConfigured('SQLite driver not installed!') conn = sqlite3.connect(self.database, timeout=self._timeout, isolation_level=None, **self.connect_params) try: self._add_conn_hooks(conn) except: conn.close() raise return conn def _add_conn_hooks(self, conn): if self._attached: self._attach_databases(conn) if self._pragmas: self._set_pragmas(conn) self._load_aggregates(conn) self._load_collations(conn) self._load_functions(conn) if self.server_version >= (3, 25, 0): self._load_window_functions(conn) if self._table_functions: for table_function in self._table_functions: table_function.register(conn) if self._extensions: self._load_extensions(conn) def _set_pragmas(self, conn): cursor = conn.cursor() for pragma, value in self._pragmas: cursor.execute('PRAGMA %s = %s;' % (pragma, value)) cursor.close() def _attach_databases(self, conn): cursor = conn.cursor() for name, db in self._attached.items(): cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name)) cursor.close() def pragma(self, key, value=SENTINEL, permanent=False, schema=None): if schema is not None: key = '"%s".%s' % (schema, key) sql = 'PRAGMA %s' % key if value is not SENTINEL: sql += ' = %s' % (value or 0) if permanent: pragmas = dict(self._pragmas or ()) pragmas[key] = value self._pragmas = list(pragmas.items()) elif permanent: raise ValueError('Cannot specify a permanent pragma without value') row = self.execute_sql(sql).fetchone() if row: return row[0] cache_size = __pragma__('cache_size') foreign_keys = __pragma__('foreign_keys') journal_mode = __pragma__('journal_mode') journal_size_limit = __pragma__('journal_size_limit') mmap_size = __pragma__('mmap_size') page_size = __pragma__('page_size') read_uncommitted = __pragma__('read_uncommitted') synchronous = __pragma__('synchronous') wal_autocheckpoint = __pragma__('wal_autocheckpoint') @property def timeout(self): return self._timeout @timeout.setter def timeout(self, seconds): if self._timeout == seconds: return self._timeout = seconds if not self.is_closed(): # PySQLite multiplies user timeout by 1000, but the unit of the # timeout PRAGMA is actually milliseconds. self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000)) def _load_aggregates(self, conn): for name, (klass, num_params) in self._aggregates.items(): conn.create_aggregate(name, num_params, klass) def _load_collations(self, conn): for name, fn in self._collations.items(): conn.create_collation(name, fn) def _load_functions(self, conn): for name, (fn, num_params) in self._functions.items(): conn.create_function(name, num_params, fn) def _load_window_functions(self, conn): for name, (klass, num_params) in self._window_functions.items(): conn.create_window_function(name, num_params, klass) def register_aggregate(self, klass, name=None, num_params=-1): self._aggregates[name or klass.__name__.lower()] = (klass, num_params) if not self.is_closed(): self._load_aggregates(self.connection()) def aggregate(self, name=None, num_params=-1): def decorator(klass): self.register_aggregate(klass, name, num_params) return klass return decorator def register_collation(self, fn, name=None): name = name or fn.__name__ def _collation(*args): expressions = args + (SQL('collate %s' % name),) return NodeList(expressions) fn.collation = _collation self._collations[name] = fn if not self.is_closed(): self._load_collations(self.connection()) def collation(self, name=None): def decorator(fn): self.register_collation(fn, name) return fn return decorator def register_function(self, fn, name=None, num_params=-1): self._functions[name or fn.__name__] = (fn, num_params) if not self.is_closed(): self._load_functions(self.connection()) def func(self, name=None, num_params=-1): def decorator(fn): self.register_function(fn, name, num_params) return fn return decorator def register_window_function(self, klass, name=None, num_params=-1): name = name or klass.__name__.lower() self._window_functions[name] = (klass, num_params) if not self.is_closed(): self._load_window_functions(self.connection()) def window_function(self, name=None, num_params=-1): def decorator(klass): self.register_window_function(klass, name, num_params) return klass return decorator def register_table_function(self, klass, name=None): if name is not None: klass.name = name self._table_functions.append(klass) if not self.is_closed(): klass.register(self.connection()) def table_function(self, name=None): def decorator(klass): self.register_table_function(klass, name) return klass return decorator def unregister_aggregate(self, name): del(self._aggregates[name]) def unregister_collation(self, name): del(self._collations[name]) def unregister_function(self, name): del(self._functions[name]) def unregister_window_function(self, name): del(self._window_functions[name]) def unregister_table_function(self, name): for idx, klass in enumerate(self._table_functions): if klass.name == name: break else: return False self._table_functions.pop(idx) return True def _load_extensions(self, conn): conn.enable_load_extension(True) for extension in self._extensions: conn.load_extension(extension) def load_extension(self, extension): self._extensions.add(extension) if not self.is_closed(): conn = self.connection() conn.enable_load_extension(True) conn.load_extension(extension) def unload_extension(self, extension): self._extensions.remove(extension) def attach(self, filename, name): if name in self._attached: if self._attached[name] == filename: return False raise OperationalError('schema "%s" already attached.' % name) self._attached[name] = filename if not self.is_closed(): self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name)) return True def detach(self, name): if name not in self._attached: return False del self._attached[name] if not self.is_closed(): self.execute_sql('DETACH DATABASE "%s"' % name) return True def begin(self, lock_type=None): statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN' self.execute_sql(statement, commit=False) def get_tables(self, schema=None): schema = schema or 'main' cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE ' 'type=? ORDER BY name' % schema, ('table',)) return [row for row, in cursor.fetchall()] def get_views(self, schema=None): sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? ' 'ORDER BY name') % (schema or 'main') return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))] def get_indexes(self, table, schema=None): schema = schema or 'main' query = ('SELECT name, sql FROM "%s".sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema cursor = self.execute_sql(query, (table, 'index')) index_to_sql = dict(cursor.fetchall()) # Determine which indexes have a unique constraint. unique_indexes = set() cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' % (schema, table)) for row in cursor.fetchall(): name = row[1] is_unique = int(row[2]) == 1 if is_unique: unique_indexes.add(name) # Retrieve the indexed columns. index_columns = {} for index_name in sorted(index_to_sql): cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' % (schema, index_name)) index_columns[index_name] = [row[2] for row in cursor.fetchall()] return [ IndexMetadata( name, index_to_sql[name], index_columns[name], name in unique_indexes, table) for name in sorted(index_to_sql)] def get_columns(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4]) for r in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' % (schema or 'main', table)) return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())] def get_foreign_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' % (schema or 'main', table)) return [ForeignKeyMetadata(row[3], row[2], row[4], table) for row in cursor.fetchall()] def get_binary_type(self): return sqlite3.Binary def conflict_statement(self, on_conflict, query): action = on_conflict._action.lower() if on_conflict._action else '' if action and action not in ('nothing', 'update'): return SQL('INSERT OR %s' % on_conflict._action.upper()) def conflict_update(self, oc, query): # Sqlite prior to 3.24.0 does not support Postgres-style upsert. if self.server_version < (3, 24, 0) and \ any((oc._preserve, oc._update, oc._where, oc._conflict_target, oc._conflict_constraint)): raise ValueError('SQLite does not support specifying which values ' 'to preserve or update.') action = oc._action.lower() if oc._action else '' if action and action not in ('nothing', 'update', ''): return if action == 'nothing': return SQL('ON CONFLICT DO NOTHING') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"NOTHING".') elif oc._conflict_constraint: raise ValueError('SQLite does not support specifying named ' 'constraints for conflict resolution.') elif not oc._conflict_target: raise ValueError('SQLite requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.date_part(date_part, date_field, python_value=int) def truncate_date(self, date_part, date_field): return fn.date_trunc(date_part, date_field, python_value=simple_date_time) def to_timestamp(self, date_field): return fn.strftime('%s', date_field).cast('integer') def from_timestamp(self, date_field): return fn.datetime(date_field, 'unixepoch') class PostgresqlDatabase(Database): field_types = { 'AUTO': 'SERIAL', 'BIGAUTO': 'BIGSERIAL', 'BLOB': 'BYTEA', 'BOOL': 'BOOLEAN', 'DATETIME': 'TIMESTAMP', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'UUID': 'UUID', 'UUIDB': 'BYTEA'} operations = {'REGEXP': '~', 'IREGEXP': '~*'} param = '%s' commit_select = True compound_select_parentheses = CSQ_PARENTHESES_ALWAYS for_update = True nulls_ordering = True returning_clause = True safe_create_index = False sequences = True def init(self, database, register_unicode=True, encoding=None, isolation_level=None, **kwargs): self._register_unicode = register_unicode self._encoding = encoding self._isolation_level = isolation_level super(PostgresqlDatabase, self).init(database, **kwargs) def _connect(self): if psycopg2 is None: raise ImproperlyConfigured('Postgres driver not installed!') conn = psycopg2.connect(database=self.database, **self.connect_params) if self._register_unicode: pg_extensions.register_type(pg_extensions.UNICODE, conn) pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn) if self._encoding: conn.set_client_encoding(self._encoding) if self._isolation_level: conn.set_isolation_level(self._isolation_level) return conn def _set_server_version(self, conn): self.server_version = conn.server_version if self.server_version >= 90600: self.safe_create_index = True def is_connection_usable(self): if self._state.closed: return False # Returns True if we are idle, running a command, or in an active # connection. If the connection is in an error state or the connection # is otherwise unusable, return False. txn_status = self._state.conn.get_transaction_status() return txn_status < pg_extensions.TRANSACTION_STATUS_INERROR def last_insert_id(self, cursor, query_type=None): try: return cursor if query_type != Insert.SIMPLE else cursor[0][0] except (IndexError, KeyError, TypeError): pass def get_tables(self, schema=None): query = ('SELECT tablename FROM pg_catalog.pg_tables ' 'WHERE schemaname = %s ORDER BY tablename') cursor = self.execute_sql(query, (schema or 'public',)) return [table for table, in cursor.fetchall()] def get_views(self, schema=None): query = ('SELECT viewname, definition FROM pg_catalog.pg_views ' 'WHERE schemaname = %s ORDER BY viewname') cursor = self.execute_sql(query, (schema or 'public',)) return [ViewMetadata(view_name, sql.strip(' \t;')) for (view_name, sql) in cursor.fetchall()] def get_indexes(self, table, schema=None): query = """ SELECT i.relname, idxs.indexdef, idx.indisunique, array_to_string(ARRAY( SELECT pg_get_indexdef(idx.indexrelid, k + 1, TRUE) FROM generate_subscripts(idx.indkey, 1) AS k ORDER BY k), ',') FROM pg_catalog.pg_class AS t INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid INNER JOIN pg_catalog.pg_indexes AS idxs ON (idxs.tablename = t.relname AND idxs.indexname = i.relname) WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s ORDER BY idx.indisunique DESC, i.relname;""" cursor = self.execute_sql(query, (table, 'r', schema or 'public')) return [IndexMetadata(name, sql.rstrip(' ;'), columns.split(','), is_unique, table) for name, sql, is_unique, columns in cursor.fetchall()] def get_columns(self, table, schema=None): query = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = %s ORDER BY ordinal_position""" cursor = self.execute_sql(query, (table, schema or 'public')) pks = set(self.get_primary_keys(table, schema)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): query = """ SELECT kc.column_name FROM information_schema.table_constraints AS tc INNER JOIN information_schema.key_column_usage AS kc ON ( tc.table_name = kc.table_name AND tc.table_schema = kc.table_schema AND tc.constraint_name = kc.constraint_name) WHERE tc.constraint_type = %s AND tc.table_name = %s AND tc.table_schema = %s""" ctype = 'PRIMARY KEY' cursor = self.execute_sql(query, (ctype, table, schema or 'public')) return [pk for pk, in cursor.fetchall()] def get_foreign_keys(self, table, schema=None): sql = """ SELECT DISTINCT kcu.column_name, ccu.table_name, ccu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON (tc.constraint_name = kcu.constraint_name AND tc.constraint_schema = kcu.constraint_schema AND tc.table_name = kcu.table_name AND tc.table_schema = kcu.table_schema) JOIN information_schema.constraint_column_usage AS ccu ON (ccu.constraint_name = tc.constraint_name AND ccu.constraint_schema = tc.constraint_schema) WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = %s AND tc.table_schema = %s""" cursor = self.execute_sql(sql, (table, schema or 'public')) return [ForeignKeyMetadata(row[0], row[1], row[2], table) for row in cursor.fetchall()] def sequence_exists(self, sequence): res = self.execute_sql(""" SELECT COUNT(*) FROM pg_class, pg_namespace WHERE relkind='S' AND pg_class.relnamespace = pg_namespace.oid AND relname=%s""", (sequence,)) return bool(res.fetchone()[0]) def get_binary_type(self): return psycopg2.Binary def conflict_statement(self, on_conflict, query): return def conflict_update(self, oc, query): action = oc._action.lower() if oc._action else '' if action in ('ignore', 'nothing'): parts = [SQL('ON CONFLICT')] if oc._conflict_target: parts.append(EnclosedNodeList([ Entity(col) if isinstance(col, basestring) else col for col in oc._conflict_target])) parts.append(SQL('DO NOTHING')) return NodeList(parts) elif action and action != 'update': raise ValueError('The only supported actions for conflict ' 'resolution with Postgresql are "ignore" or ' '"update".') elif not oc._update and not oc._preserve: raise ValueError('If you are not performing any updates (or ' 'preserving any INSERTed values), then the ' 'conflict resolution action should be set to ' '"IGNORE".') elif not (oc._conflict_target or oc._conflict_constraint): raise ValueError('Postgres requires that a conflict target be ' 'specified when doing an upsert.') return self._build_on_conflict_update(oc, query) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_TRUNC(date_part, date_field) def to_timestamp(self, date_field): return self.extract_date('EPOCH', date_field) def from_timestamp(self, date_field): # Ironically, here, Postgres means "to the Postgresql timestamp type". return fn.to_timestamp(date_field) def get_noop_select(self, ctx): return ctx.sql(Select().columns(SQL('0')).where(SQL('false'))) def set_time_zone(self, timezone): self.execute_sql('set time zone "%s";' % timezone) class MySQLDatabase(Database): field_types = { 'AUTO': 'INTEGER AUTO_INCREMENT', 'BIGAUTO': 'BIGINT AUTO_INCREMENT', 'BOOL': 'BOOL', 'DECIMAL': 'NUMERIC', 'DOUBLE': 'DOUBLE PRECISION', 'FLOAT': 'FLOAT', 'UUID': 'VARCHAR(40)', 'UUIDB': 'VARBINARY(16)'} operations = { 'LIKE': 'LIKE BINARY', 'ILIKE': 'LIKE', 'REGEXP': 'REGEXP BINARY', 'IREGEXP': 'REGEXP', 'XOR': 'XOR'} param = '%s' quote = '``' commit_select = True compound_select_parentheses = CSQ_PARENTHESES_UNNESTED for_update = True index_using_precedes_table = True limit_max = 2 ** 64 - 1 safe_create_index = False safe_drop_index = False sql_mode = 'PIPES_AS_CONCAT' def init(self, database, **kwargs): params = { 'charset': 'utf8', 'sql_mode': self.sql_mode, 'use_unicode': True} params.update(kwargs) if 'password' in params and mysql_passwd: params['passwd'] = params.pop('password') super(MySQLDatabase, self).init(database, **params) def _connect(self): if mysql is None: raise ImproperlyConfigured('MySQL driver not installed!') conn = mysql.connect(db=self.database, **self.connect_params) return conn def _set_server_version(self, conn): try: version_raw = conn.server_version except AttributeError: version_raw = conn.get_server_info() self.server_version = self._extract_server_version(version_raw) def _extract_server_version(self, version): version = version.lower() if 'maria' in version: match_obj = re.search(r'(1\d\.\d+\.\d+)', version) else: match_obj = re.search(r'(\d\.\d+\.\d+)', version) if match_obj is not None: return tuple(int(num) for num in match_obj.groups()[0].split('.')) warnings.warn('Unable to determine MySQL version: "%s"' % version) return (0, 0, 0) # Unable to determine version! def default_values_insert(self, ctx): return ctx.literal('() VALUES ()') def get_tables(self, schema=None): query = ('SELECT table_name FROM information_schema.tables ' 'WHERE table_schema = DATABASE() AND table_type != %s ' 'ORDER BY table_name') return [table for table, in self.execute_sql(query, ('VIEW',))] def get_views(self, schema=None): query = ('SELECT table_name, view_definition ' 'FROM information_schema.views ' 'WHERE table_schema = DATABASE() ORDER BY table_name') cursor = self.execute_sql(query) return [ViewMetadata(*row) for row in cursor.fetchall()] def get_indexes(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) unique = set() indexes = {} for row in cursor.fetchall(): if not row[1]: unique.add(row[2]) indexes.setdefault(row[2], []) indexes[row[2]].append(row[4]) return [IndexMetadata(name, None, indexes[name], name in unique, table) for name in indexes] def get_columns(self, table, schema=None): sql = """ SELECT column_name, is_nullable, data_type, column_default FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""" cursor = self.execute_sql(sql, (table,)) pks = set(self.get_primary_keys(table)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df) for name, null, dt, df in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) return [row[4] for row in filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())] def get_foreign_keys(self, table, schema=None): query = """ SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""" cursor = self.execute_sql(query, (table,)) return [ ForeignKeyMetadata(column, dest_table, dest_column, table) for column, dest_table, dest_column in cursor.fetchall()] def get_binary_type(self): return mysql.Binary def conflict_statement(self, on_conflict, query): if not on_conflict._action: return action = on_conflict._action.lower() if action == 'replace': return SQL('REPLACE') elif action == 'ignore': return SQL('INSERT IGNORE') elif action != 'update': raise ValueError('Un-supported action for conflict resolution. ' 'MySQL supports REPLACE, IGNORE and UPDATE.') def conflict_update(self, on_conflict, query): if on_conflict._where or on_conflict._conflict_target or \ on_conflict._conflict_constraint: raise ValueError('MySQL does not support the specification of ' 'where clauses or conflict targets for conflict ' 'resolution.') updates = [] if on_conflict._preserve: # Here we need to determine which function to use, which varies # depending on the MySQL server version. MySQL and MariaDB prior to # 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE". version = self.server_version or (0,) if version[0] == 10 and version >= (10, 3, 3): VALUE_FN = fn.VALUE else: VALUE_FN = fn.VALUES for column in on_conflict._preserve: entity = ensure_entity(column) expression = NodeList(( ensure_entity(column), SQL('='), VALUE_FN(entity))) updates.append(expression) if on_conflict._update: for k, v in on_conflict._update.items(): if not isinstance(v, Node): # Attempt to resolve string field-names to their respective # field object, to apply data-type conversions. if isinstance(k, basestring): k = getattr(query.table, k) if isinstance(k, Field): v = k.to_value(v) else: v = Value(v, unpack=False) updates.append(NodeList((ensure_entity(k), SQL('='), v))) if updates: return NodeList((SQL('ON DUPLICATE KEY UPDATE'), CommaNodeList(updates))) def extract_date(self, date_part, date_field): return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field))) def truncate_date(self, date_part, date_field): return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part], python_value=simple_date_time) def to_timestamp(self, date_field): return fn.UNIX_TIMESTAMP(date_field) def from_timestamp(self, date_field): return fn.FROM_UNIXTIME(date_field) def random(self): return fn.rand() def get_noop_select(self, ctx): return ctx.literal('DO 0') # TRANSACTION CONTROL. class _manual(_callable_context_manager): def __init__(self, db): self.db = db def __enter__(self): top = self.db.top_transaction() if top is not None and not isinstance(top, _manual): raise ValueError('Cannot enter manual commit block while a ' 'transaction is active.') self.db.push_transaction(self) def __exit__(self, exc_type, exc_val, exc_tb): if self.db.pop_transaction() is not self: raise ValueError('Transaction stack corrupted while exiting ' 'manual commit block.') class _atomic(_callable_context_manager): def __init__(self, db, *args, **kwargs): self.db = db self._transaction_args = (args, kwargs) def __enter__(self): if self.db.transaction_depth() == 0: args, kwargs = self._transaction_args self._helper = self.db.transaction(*args, **kwargs) elif isinstance(self.db.top_transaction(), _manual): raise ValueError('Cannot enter atomic commit block while in ' 'manual commit mode.') else: self._helper = self.db.savepoint() return self._helper.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): return self._helper.__exit__(exc_type, exc_val, exc_tb) class _transaction(_callable_context_manager): def __init__(self, db, *args, **kwargs): self.db = db self._begin_args = (args, kwargs) def _begin(self): args, kwargs = self._begin_args self.db.begin(*args, **kwargs) def commit(self, begin=True): self.db.commit() if begin: self._begin() def rollback(self, begin=True): self.db.rollback() if begin: self._begin() def __enter__(self): if self.db.transaction_depth() == 0: self._begin() self.db.push_transaction(self) return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type: self.rollback(False) elif self.db.transaction_depth() == 1: try: self.commit(False) except: self.rollback(False) raise finally: self.db.pop_transaction() class _savepoint(_callable_context_manager): def __init__(self, db, sid=None): self.db = db self.sid = sid or 's' + uuid.uuid4().hex self.quoted_sid = self.sid.join(self.db.quote) def _begin(self): self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid) def commit(self, begin=True): self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid) if begin: self._begin() def rollback(self): self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid) def __enter__(self): self._begin() return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type: self.rollback() else: try: self.commit(begin=False) except: self.rollback() raise # CURSOR REPRESENTATIONS. class CursorWrapper(object): def __init__(self, cursor): self.cursor = cursor self.count = 0 self.index = 0 self.initialized = False self.populated = False self.row_cache = [] def __iter__(self): if self.populated: return iter(self.row_cache) return ResultIterator(self) def __getitem__(self, item): if isinstance(item, slice): stop = item.stop if stop is None or stop < 0: self.fill_cache() else: self.fill_cache(stop) return self.row_cache[item] elif isinstance(item, int): self.fill_cache(item if item > 0 else 0) return self.row_cache[item] else: raise ValueError('CursorWrapper only supports integer and slice ' 'indexes.') def __len__(self): self.fill_cache() return self.count def initialize(self): pass def iterate(self, cache=True): row = self.cursor.fetchone() if row is None: self.populated = True self.cursor.close() raise StopIteration elif not self.initialized: self.initialize() # Lazy initialization. self.initialized = True self.count += 1 result = self.process_row(row) if cache: self.row_cache.append(result) return result def process_row(self, row): return row def iterator(self): """Efficient one-pass iteration over the result set.""" while True: try: yield self.iterate(False) except StopIteration: return def fill_cache(self, n=0): n = n or float('Inf') if n < 0: raise ValueError('Negative values are not supported.') iterator = ResultIterator(self) iterator.index = self.count while not self.populated and (n > self.count): try: iterator.next() except StopIteration: break class DictCursorWrapper(CursorWrapper): def _initialize_columns(self): description = self.cursor.description self.columns = [t[0][t[0].find('.') + 1:].strip('")') for t in description] self.ncols = len(description) initialize = _initialize_columns def _row_to_dict(self, row): result = {} for i in range(self.ncols): result.setdefault(self.columns[i], row[i]) # Do not overwrite. return result process_row = _row_to_dict class NamedTupleCursorWrapper(CursorWrapper): def initialize(self): description = self.cursor.description self.tuple_class = collections.namedtuple( 'Row', [col[0][col[0].find('.') + 1:].strip('"') for col in description]) def process_row(self, row): return self.tuple_class(*row) class ObjectCursorWrapper(DictCursorWrapper): def __init__(self, cursor, constructor): super(ObjectCursorWrapper, self).__init__(cursor) self.constructor = constructor def process_row(self, row): row_dict = self._row_to_dict(row) return self.constructor(**row_dict) class ResultIterator(object): def __init__(self, cursor_wrapper): self.cursor_wrapper = cursor_wrapper self.index = 0 def __iter__(self): return self def next(self): if self.index < self.cursor_wrapper.count: obj = self.cursor_wrapper.row_cache[self.index] elif not self.cursor_wrapper.populated: self.cursor_wrapper.iterate() obj = self.cursor_wrapper.row_cache[self.index] else: raise StopIteration self.index += 1 return obj __next__ = next # FIELDS class FieldAccessor(object): def __init__(self, model, field, name): self.model = model self.field = field self.name = name def __get__(self, instance, instance_type=None): if instance is not None: return instance.__data__.get(self.name) return self.field def __set__(self, instance, value): instance.__data__[self.name] = value instance._dirty.add(self.name) class ForeignKeyAccessor(FieldAccessor): def __init__(self, model, field, name): super(ForeignKeyAccessor, self).__init__(model, field, name) self.rel_model = field.rel_model def get_rel_instance(self, instance): value = instance.__data__.get(self.name) if value is not None or self.name in instance.__rel__: if self.name not in instance.__rel__ and self.field.lazy_load: obj = self.rel_model.get(self.field.rel_field == value) instance.__rel__[self.name] = obj return instance.__rel__.get(self.name, value) elif not self.field.null and self.field.lazy_load: raise self.rel_model.DoesNotExist return value def __get__(self, instance, instance_type=None): if instance is not None: return self.get_rel_instance(instance) return self.field def __set__(self, instance, obj): if isinstance(obj, self.rel_model): instance.__data__[self.name] = getattr(obj, self.field.rel_field.name) instance.__rel__[self.name] = obj else: fk_value = instance.__data__.get(self.name) instance.__data__[self.name] = obj if (obj != fk_value or obj is None) and \ self.name in instance.__rel__: del instance.__rel__[self.name] instance._dirty.add(self.name) class BackrefAccessor(object): def __init__(self, field): self.field = field self.model = field.rel_model self.rel_model = field.model def __get__(self, instance, instance_type=None): if instance is not None: dest = self.field.rel_field.name return (self.rel_model .select() .where(self.field == getattr(instance, dest))) return self class ObjectIdAccessor(object): """Gives direct access to the underlying id""" def __init__(self, field): self.field = field def __get__(self, instance, instance_type=None): if instance is not None: value = instance.__data__.get(self.field.name) # Pull the object-id from the related object if it is not set. if value is None and self.field.name in instance.__rel__: rel_obj = instance.__rel__[self.field.name] value = getattr(rel_obj, self.field.rel_field.name) return value return self.field def __set__(self, instance, value): setattr(instance, self.field.name, value) class Field(ColumnBase): _field_counter = 0 _order = 0 accessor_class = FieldAccessor auto_increment = False default_index_type = None field_type = 'DEFAULT' unpack = True def __init__(self, null=False, index=False, unique=False, column_name=None, default=None, primary_key=False, constraints=None, sequence=None, collation=None, unindexed=False, choices=None, help_text=None, verbose_name=None, index_type=None, db_column=None, _hidden=False): if db_column is not None: __deprecated__('"db_column" has been deprecated in favor of ' '"column_name" for Field objects.') column_name = db_column self.null = null self.index = index self.unique = unique self.column_name = column_name self.default = default self.primary_key = primary_key self.constraints = constraints # List of column constraints. self.sequence = sequence # Name of sequence, e.g. foo_id_seq. self.collation = collation self.unindexed = unindexed self.choices = choices self.help_text = help_text self.verbose_name = verbose_name self.index_type = index_type or self.default_index_type self._hidden = _hidden # Used internally for recovering the order in which Fields were defined # on the Model class. Field._field_counter += 1 self._order = Field._field_counter self._sort_key = (self.primary_key and 1 or 2), self._order def __hash__(self): return hash(self.name + '.' + self.model.__name__) def __repr__(self): if hasattr(self, 'model') and getattr(self, 'name', None): return '<%s: %s.%s>' % (type(self).__name__, self.model.__name__, self.name) return '<%s: (unbound)>' % type(self).__name__ def bind(self, model, name, set_attribute=True): self.model = model self.name = self.safe_name = name self.column_name = self.column_name or name if set_attribute: setattr(model, name, self.accessor_class(model, self, name)) @property def column(self): return Column(self.model._meta.table, self.column_name) def adapt(self, value): return value def db_value(self, value): return value if value is None else self.adapt(value) def python_value(self, value): return value if value is None else self.adapt(value) def to_value(self, value): return Value(value, self.db_value, unpack=False) def get_sort_key(self, ctx): return self._sort_key def __sql__(self, ctx): return ctx.sql(self.column) def get_modifiers(self): pass def ddl_datatype(self, ctx): if ctx and ctx.state.field_types: column_type = ctx.state.field_types.get(self.field_type, self.field_type) else: column_type = self.field_type modifiers = self.get_modifiers() if column_type and modifiers: modifier_literal = ', '.join([str(m) for m in modifiers]) return SQL('%s(%s)' % (column_type, modifier_literal)) else: return SQL(column_type) def ddl(self, ctx): accum = [Entity(self.column_name)] data_type = self.ddl_datatype(ctx) if data_type: accum.append(data_type) if self.unindexed: accum.append(SQL('UNINDEXED')) if not self.null: accum.append(SQL('NOT NULL')) if self.primary_key: accum.append(SQL('PRIMARY KEY')) if self.sequence: accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence)) if self.constraints: accum.extend(self.constraints) if self.collation: accum.append(SQL('COLLATE %s' % self.collation)) return NodeList(accum) class IntegerField(Field): field_type = 'INT' def adapt(self, value): try: return int(value) except ValueError: return value class BigIntegerField(IntegerField): field_type = 'BIGINT' class SmallIntegerField(IntegerField): field_type = 'SMALLINT' class AutoField(IntegerField): auto_increment = True field_type = 'AUTO' def __init__(self, *args, **kwargs): if kwargs.get('primary_key') is False: raise ValueError('%s must always be a primary key.' % type(self)) kwargs['primary_key'] = True super(AutoField, self).__init__(*args, **kwargs) class BigAutoField(AutoField): field_type = 'BIGAUTO' class IdentityField(AutoField): field_type = 'INT GENERATED BY DEFAULT AS IDENTITY' def __init__(self, generate_always=False, **kwargs): if generate_always: self.field_type = 'INT GENERATED ALWAYS AS IDENTITY' super(IdentityField, self).__init__(**kwargs) class PrimaryKeyField(AutoField): def __init__(self, *args, **kwargs): __deprecated__('"PrimaryKeyField" has been renamed to "AutoField". ' 'Please update your code accordingly as this will be ' 'completely removed in a subsequent release.') super(PrimaryKeyField, self).__init__(*args, **kwargs) class FloatField(Field): field_type = 'FLOAT' def adapt(self, value): try: return float(value) except ValueError: return value class DoubleField(FloatField): field_type = 'DOUBLE' class DecimalField(Field): field_type = 'DECIMAL' def __init__(self, max_digits=10, decimal_places=5, auto_round=False, rounding=None, *args, **kwargs): self.max_digits = max_digits self.decimal_places = decimal_places self.auto_round = auto_round self.rounding = rounding or decimal.DefaultContext.rounding self._exp = decimal.Decimal(10) ** (-self.decimal_places) super(DecimalField, self).__init__(*args, **kwargs) def get_modifiers(self): return [self.max_digits, self.decimal_places] def db_value(self, value): D = decimal.Decimal if not value: return value if value is None else D(0) if self.auto_round: decimal_value = D(text_type(value)) return decimal_value.quantize(self._exp, rounding=self.rounding) return value def python_value(self, value): if value is not None: if isinstance(value, decimal.Decimal): return value return decimal.Decimal(text_type(value)) class _StringField(Field): def adapt(self, value): if isinstance(value, text_type): return value elif isinstance(value, bytes_type): return value.decode('utf-8') return text_type(value) def __add__(self, other): return StringExpression(self, OP.CONCAT, other) def __radd__(self, other): return StringExpression(other, OP.CONCAT, self) class CharField(_StringField): field_type = 'VARCHAR' def __init__(self, max_length=255, *args, **kwargs): self.max_length = max_length super(CharField, self).__init__(*args, **kwargs) def get_modifiers(self): return self.max_length and [self.max_length] or None class FixedCharField(CharField): field_type = 'CHAR' def python_value(self, value): value = super(FixedCharField, self).python_value(value) if value: value = value.strip() return value class TextField(_StringField): field_type = 'TEXT' class BlobField(Field): field_type = 'BLOB' def _db_hook(self, database): if database is None: self._constructor = bytearray else: self._constructor = database.get_binary_type() def bind(self, model, name, set_attribute=True): self._constructor = bytearray if model._meta.database: if isinstance(model._meta.database, Proxy): model._meta.database.attach_callback(self._db_hook) else: self._db_hook(model._meta.database) # Attach a hook to the model metadata; in the event the database is # changed or set at run-time, we will be sure to apply our callback and # use the proper data-type for our database driver. model._meta._db_hooks.append(self._db_hook) return super(BlobField, self).bind(model, name, set_attribute) def db_value(self, value): if isinstance(value, text_type): value = value.encode('raw_unicode_escape') if isinstance(value, bytes_type): return self._constructor(value) return value class BitField(BitwiseMixin, BigIntegerField): def __init__(self, *args, **kwargs): kwargs.setdefault('default', 0) super(BitField, self).__init__(*args, **kwargs) self.__current_flag = 1 def flag(self, value=None): if value is None: value = self.__current_flag self.__current_flag <<= 1 else: self.__current_flag = value << 1 class FlagDescriptor(ColumnBase): def __init__(self, field, value): self._field = field self._value = value super(FlagDescriptor, self).__init__() def clear(self): return self._field.bin_and(~self._value) def set(self): return self._field.bin_or(self._value) def __get__(self, instance, instance_type=None): if instance is None: return self value = getattr(instance, self._field.name) or 0 return (value & self._value) != 0 def __set__(self, instance, is_set): if is_set not in (True, False): raise ValueError('Value must be either True or False') value = getattr(instance, self._field.name) or 0 if is_set: value |= self._value else: value &= ~self._value setattr(instance, self._field.name, value) def __sql__(self, ctx): return ctx.sql(self._field.bin_and(self._value) != 0) return FlagDescriptor(self, value) class BigBitFieldData(object): def __init__(self, instance, name): self.instance = instance self.name = name value = self.instance.__data__.get(self.name) if not value: value = bytearray() elif not isinstance(value, bytearray): value = bytearray(value) self._buffer = self.instance.__data__[self.name] = value def _ensure_length(self, idx): byte_num, byte_offset = divmod(idx, 8) cur_size = len(self._buffer) if cur_size <= byte_num: self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size)) return byte_num, byte_offset def set_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] |= (1 << byte_offset) def clear_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] &= ~(1 << byte_offset) def toggle_bit(self, idx): byte_num, byte_offset = self._ensure_length(idx) self._buffer[byte_num] ^= (1 << byte_offset) return bool(self._buffer[byte_num] & (1 << byte_offset)) def is_set(self, idx): byte_num, byte_offset = self._ensure_length(idx) return bool(self._buffer[byte_num] & (1 << byte_offset)) def __repr__(self): return repr(self._buffer) class BigBitFieldAccessor(FieldAccessor): def __get__(self, instance, instance_type=None): if instance is None: return self.field return BigBitFieldData(instance, self.name) def __set__(self, instance, value): if isinstance(value, memoryview): value = value.tobytes() elif isinstance(value, buffer_type): value = bytes(value) elif isinstance(value, bytearray): value = bytes_type(value) elif isinstance(value, BigBitFieldData): value = bytes_type(value._buffer) elif isinstance(value, text_type): value = value.encode('utf-8') elif not isinstance(value, bytes_type): raise ValueError('Value must be either a bytes, memoryview or ' 'BigBitFieldData instance.') super(BigBitFieldAccessor, self).__set__(instance, value) class BigBitField(BlobField): accessor_class = BigBitFieldAccessor def __init__(self, *args, **kwargs): kwargs.setdefault('default', bytes_type) super(BigBitField, self).__init__(*args, **kwargs) def db_value(self, value): return bytes_type(value) if value is not None else value class UUIDField(Field): field_type = 'UUID' def db_value(self, value): if isinstance(value, basestring) and len(value) == 32: # Hex string. No transformation is necessary. return value elif isinstance(value, bytes) and len(value) == 16: # Allow raw binary representation. value = uuid.UUID(bytes=value) if isinstance(value, uuid.UUID): return value.hex try: return uuid.UUID(value).hex except: return value def python_value(self, value): if isinstance(value, uuid.UUID): return value return uuid.UUID(value) if value is not None else None class BinaryUUIDField(BlobField): field_type = 'UUIDB' def db_value(self, value): if isinstance(value, bytes) and len(value) == 16: # Raw binary value. No transformation is necessary. return self._constructor(value) elif isinstance(value, basestring) and len(value) == 32: # Allow hex string representation. value = uuid.UUID(hex=value) if isinstance(value, uuid.UUID): return self._constructor(value.bytes) elif value is not None: raise ValueError('value for binary UUID field must be UUID(), ' 'a hexadecimal string, or a bytes object.') def python_value(self, value): if isinstance(value, uuid.UUID): return value elif isinstance(value, memoryview): value = value.tobytes() elif value and not isinstance(value, bytes): value = bytes(value) return uuid.UUID(bytes=value) if value is not None else None def _date_part(date_part): def dec(self): return self.model._meta.database.extract_date(date_part, self) return dec def format_date_time(value, formats, post_process=None): post_process = post_process or (lambda x: x) for fmt in formats: try: return post_process(datetime.datetime.strptime(value, fmt)) except ValueError: pass return value def simple_date_time(value): try: return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S') except (TypeError, ValueError): return value class _BaseFormattedField(Field): formats = None def __init__(self, formats=None, *args, **kwargs): if formats is not None: self.formats = formats super(_BaseFormattedField, self).__init__(*args, **kwargs) class DateTimeField(_BaseFormattedField): field_type = 'DATETIME' formats = [ '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', ] def adapt(self, value): if value and isinstance(value, basestring): return format_date_time(value, self.formats) return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) class DateField(_BaseFormattedField): field_type = 'DATE' formats = [ '%Y-%m-%d', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', ] def adapt(self, value): if value and isinstance(value, basestring): pp = lambda x: x.date() return format_date_time(value, self.formats, pp) elif value and isinstance(value, datetime.datetime): return value.date() return value def to_timestamp(self): return self.model._meta.database.to_timestamp(self) def truncate(self, part): return self.model._meta.database.truncate_date(part, self) year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) class TimeField(_BaseFormattedField): field_type = 'TIME' formats = [ '%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', ] def adapt(self, value): if value: if isinstance(value, basestring): pp = lambda x: x.time() return format_date_time(value, self.formats, pp) elif isinstance(value, datetime.datetime): return value.time() if value is not None and isinstance(value, datetime.timedelta): return (datetime.datetime.min + value).time() return value hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) def _timestamp_date_part(date_part): def dec(self): db = self.model._meta.database expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return db.extract_date(date_part, db.from_timestamp(expr)) return dec class TimestampField(BigIntegerField): # Support second -> microsecond resolution. valid_resolutions = [10**i for i in range(7)] def __init__(self, *args, **kwargs): self.resolution = kwargs.pop('resolution', None) if not self.resolution: self.resolution = 1 elif self.resolution in range(2, 7): self.resolution = 10 ** self.resolution elif self.resolution not in self.valid_resolutions: raise ValueError('TimestampField resolution must be one of: %s' % ', '.join(str(i) for i in self.valid_resolutions)) self.ticks_to_microsecond = 1000000 // self.resolution self.utc = kwargs.pop('utc', False) or False dflt = datetime.datetime.utcnow if self.utc else datetime.datetime.now kwargs.setdefault('default', dflt) super(TimestampField, self).__init__(*args, **kwargs) def local_to_utc(self, dt): # Convert naive local datetime into naive UTC, e.g.: # 2019-03-01T12:00:00 (local=US/Central) -> 2019-03-01T18:00:00. # 2019-05-01T12:00:00 (local=US/Central) -> 2019-05-01T17:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6]) def utc_to_local(self, dt): # Convert a naive UTC datetime into local time, e.g.: # 2019-03-01T18:00:00 (local=US/Central) -> 2019-03-01T12:00:00. # 2019-05-01T17:00:00 (local=US/Central) -> 2019-05-01T12:00:00. # 2019-03-01T12:00:00 (local=UTC) -> 2019-03-01T12:00:00. ts = calendar.timegm(dt.utctimetuple()) return datetime.datetime.fromtimestamp(ts) def get_timestamp(self, value): if self.utc: # If utc-mode is on, then we assume all naive datetimes are in UTC. return calendar.timegm(value.utctimetuple()) else: return time.mktime(value.timetuple()) def db_value(self, value): if value is None: return if isinstance(value, datetime.datetime): pass elif isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) else: return int(round(value * self.resolution)) timestamp = self.get_timestamp(value) if self.resolution > 1: timestamp += (value.microsecond * .000001) timestamp *= self.resolution return int(round(timestamp)) def python_value(self, value): if value is not None and isinstance(value, (int, float, long)): if self.resolution > 1: value, ticks = divmod(value, self.resolution) microseconds = int(ticks * self.ticks_to_microsecond) else: microseconds = 0 if self.utc: value = datetime.datetime.utcfromtimestamp(value) else: value = datetime.datetime.fromtimestamp(value) if microseconds: value = value.replace(microsecond=microseconds) return value def from_timestamp(self): expr = ((self / Value(self.resolution, converter=False)) if self.resolution > 1 else self) return self.model._meta.database.from_timestamp(expr) year = property(_timestamp_date_part('year')) month = property(_timestamp_date_part('month')) day = property(_timestamp_date_part('day')) hour = property(_timestamp_date_part('hour')) minute = property(_timestamp_date_part('minute')) second = property(_timestamp_date_part('second')) class IPField(BigIntegerField): def db_value(self, val): if val is not None: return struct.unpack('!I', socket.inet_aton(val))[0] def python_value(self, val): if val is not None: return socket.inet_ntoa(struct.pack('!I', val)) class BooleanField(Field): field_type = 'BOOL' adapt = bool class BareField(Field): def __init__(self, adapt=None, *args, **kwargs): super(BareField, self).__init__(*args, **kwargs) if adapt is not None: self.adapt = adapt def ddl_datatype(self, ctx): return class ForeignKeyField(Field): accessor_class = ForeignKeyAccessor backref_accessor_class = BackrefAccessor def __init__(self, model, field=None, backref=None, on_delete=None, on_update=None, deferrable=None, _deferred=None, rel_model=None, to_field=None, object_id_name=None, lazy_load=True, constraint_name=None, related_name=None, *args, **kwargs): kwargs.setdefault('index', True) super(ForeignKeyField, self).__init__(*args, **kwargs) if rel_model is not None: __deprecated__('"rel_model" has been deprecated in favor of ' '"model" for ForeignKeyField objects.') model = rel_model if to_field is not None: __deprecated__('"to_field" has been deprecated in favor of ' '"field" for ForeignKeyField objects.') field = to_field if related_name is not None: __deprecated__('"related_name" has been deprecated in favor of ' '"backref" for Field objects.') backref = related_name self._is_self_reference = model == 'self' self.rel_model = model self.rel_field = field self.declared_backref = backref self.backref = None self.on_delete = on_delete self.on_update = on_update self.deferrable = deferrable self.deferred = _deferred self.object_id_name = object_id_name self.lazy_load = lazy_load self.constraint_name = constraint_name @property def field_type(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.field_type elif isinstance(self.rel_field, BigAutoField): return BigIntegerField.field_type return IntegerField.field_type def get_modifiers(self): if not isinstance(self.rel_field, AutoField): return self.rel_field.get_modifiers() return super(ForeignKeyField, self).get_modifiers() def adapt(self, value): return self.rel_field.adapt(value) def db_value(self, value): if isinstance(value, self.rel_model): value = getattr(value, self.rel_field.name) return self.rel_field.db_value(value) def python_value(self, value): if isinstance(value, self.rel_model): return value return self.rel_field.python_value(value) def bind(self, model, name, set_attribute=True): if not self.column_name: self.column_name = name if name.endswith('_id') else name + '_id' if not self.object_id_name: self.object_id_name = self.column_name if self.object_id_name == name: self.object_id_name += '_id' elif self.object_id_name == name: raise ValueError('ForeignKeyField "%s"."%s" specifies an ' 'object_id_name that conflicts with its field ' 'name.' % (model._meta.name, name)) if self._is_self_reference: self.rel_model = model if isinstance(self.rel_field, basestring): self.rel_field = getattr(self.rel_model, self.rel_field) elif self.rel_field is None: self.rel_field = self.rel_model._meta.primary_key # Bind field before assigning backref, so field is bound when # calling declared_backref() (if callable). super(ForeignKeyField, self).bind(model, name, set_attribute) self.safe_name = self.object_id_name if callable_(self.declared_backref): self.backref = self.declared_backref(self) else: self.backref, self.declared_backref = self.declared_backref, None if not self.backref: self.backref = '%s_set' % model._meta.name if set_attribute: setattr(model, self.object_id_name, ObjectIdAccessor(self)) if self.backref not in '!+': setattr(self.rel_model, self.backref, self.backref_accessor_class(self)) def foreign_key_constraint(self): parts = [] if self.constraint_name: parts.extend((SQL('CONSTRAINT'), Entity(self.constraint_name))) parts.extend([ SQL('FOREIGN KEY'), EnclosedNodeList((self,)), SQL('REFERENCES'), self.rel_model, EnclosedNodeList((self.rel_field,))]) if self.on_delete: parts.append(SQL('ON DELETE %s' % self.on_delete)) if self.on_update: parts.append(SQL('ON UPDATE %s' % self.on_update)) if self.deferrable: parts.append(SQL('DEFERRABLE %s' % self.deferrable)) return NodeList(parts) def __getattr__(self, attr): if attr.startswith('__'): # Prevent recursion error when deep-copying. raise AttributeError('Cannot look-up non-existant "__" methods.') if attr in self.rel_model._meta.fields: return self.rel_model._meta.fields[attr] raise AttributeError('Foreign-key has no attribute %s, nor is it a ' 'valid field on the related model.' % attr) class DeferredForeignKey(Field): _unresolved = set() def __init__(self, rel_model_name, **kwargs): self.field_kwargs = kwargs self.rel_model_name = rel_model_name.lower() DeferredForeignKey._unresolved.add(self) super(DeferredForeignKey, self).__init__( column_name=kwargs.get('column_name'), null=kwargs.get('null'), primary_key=kwargs.get('primary_key')) __hash__ = object.__hash__ def __deepcopy__(self, memo=None): return DeferredForeignKey(self.rel_model_name, **self.field_kwargs) def set_model(self, rel_model): field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs) if field.primary_key: # NOTE: this calls add_field() under-the-hood. self.model._meta.set_primary_key(self.name, field) else: self.model._meta.add_field(self.name, field) @staticmethod def resolve(model_cls): unresolved = sorted(DeferredForeignKey._unresolved, key=operator.attrgetter('_order')) for dr in unresolved: if dr.rel_model_name == model_cls.__name__.lower(): dr.set_model(model_cls) DeferredForeignKey._unresolved.discard(dr) class DeferredThroughModel(object): def __init__(self): self._refs = [] def set_field(self, model, field, name): self._refs.append((model, field, name)) def set_model(self, through_model): for src_model, m2mfield, name in self._refs: m2mfield.through_model = through_model src_model._meta.add_field(name, m2mfield) class MetaField(Field): column_name = default = model = name = None primary_key = False class ManyToManyFieldAccessor(FieldAccessor): def __init__(self, model, field, name): super(ManyToManyFieldAccessor, self).__init__(model, field, name) self.model = field.model self.rel_model = field.rel_model self.through_model = field.through_model src_fks = self.through_model._meta.model_refs[self.model] dest_fks = self.through_model._meta.model_refs[self.rel_model] if not src_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.model, self.through_model)) elif not dest_fks: raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' % (self.rel_model, self.through_model)) self.src_fk = src_fks[0] self.dest_fk = dest_fks[0] def __get__(self, instance, instance_type=None, force_query=False): if instance is not None: if not force_query and self.src_fk.backref != '+': backref = getattr(instance, self.src_fk.backref) if isinstance(backref, list): return [getattr(obj, self.dest_fk.name) for obj in backref] src_id = getattr(instance, self.src_fk.rel_field.name) return (ManyToManyQuery(instance, self, self.rel_model) .join(self.through_model) .join(self.model) .where(self.src_fk == src_id)) return self.field def __set__(self, instance, value): query = self.__get__(instance, force_query=True) query.add(value, clear_existing=True) class ManyToManyField(MetaField): accessor_class = ManyToManyFieldAccessor def __init__(self, model, backref=None, through_model=None, on_delete=None, on_update=None, _is_backref=False): if through_model is not None: if not (isinstance(through_model, DeferredThroughModel) or is_model(through_model)): raise TypeError('Unexpected value for through_model. Expected ' 'Model or DeferredThroughModel.') if not _is_backref and (on_delete is not None or on_update is not None): raise ValueError('Cannot specify on_delete or on_update when ' 'through_model is specified.') self.rel_model = model self.backref = backref self._through_model = through_model self._on_delete = on_delete self._on_update = on_update self._is_backref = _is_backref def _get_descriptor(self): return ManyToManyFieldAccessor(self) def bind(self, model, name, set_attribute=True): if isinstance(self._through_model, DeferredThroughModel): self._through_model.set_field(model, self, name) return super(ManyToManyField, self).bind(model, name, set_attribute) if not self._is_backref: many_to_many_field = ManyToManyField( self.model, backref=name, through_model=self.through_model, on_delete=self._on_delete, on_update=self._on_update, _is_backref=True) self.backref = self.backref or model._meta.name + 's' self.rel_model._meta.add_field(self.backref, many_to_many_field) def get_models(self): return [model for _, model in sorted(( (self._is_backref, self.model), (not self._is_backref, self.rel_model)))] @property def through_model(self): if self._through_model is None: self._through_model = self._create_through_model() return self._through_model @through_model.setter def through_model(self, value): self._through_model = value def _create_through_model(self): lhs, rhs = self.get_models() tables = [model._meta.table_name for model in (lhs, rhs)] class Meta: database = self.model._meta.database schema = self.model._meta.schema table_name = '%s_%s_through' % tuple(tables) indexes = ( ((lhs._meta.name, rhs._meta.name), True),) params = {'on_delete': self._on_delete, 'on_update': self._on_update} attrs = { lhs._meta.name: ForeignKeyField(lhs, **params), rhs._meta.name: ForeignKeyField(rhs, **params), 'Meta': Meta} klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__) return type(klass_name, (Model,), attrs) def get_through_model(self): # XXX: Deprecated. Just use the "through_model" property. return self.through_model class VirtualField(MetaField): field_class = None def __init__(self, field_class=None, *args, **kwargs): Field = field_class if field_class is not None else self.field_class self.field_instance = Field() if Field is not None else None super(VirtualField, self).__init__(*args, **kwargs) def db_value(self, value): if self.field_instance is not None: return self.field_instance.db_value(value) return value def python_value(self, value): if self.field_instance is not None: return self.field_instance.python_value(value) return value def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, name, self.accessor_class(model, self, name)) class CompositeKey(MetaField): sequence = None def __init__(self, *field_names): self.field_names = field_names self._safe_field_names = None @property def safe_field_names(self): if self._safe_field_names is None: if self.model is None: return self.field_names self._safe_field_names = [self.model._meta.fields[f].safe_name for f in self.field_names] return self._safe_field_names def __get__(self, instance, instance_type=None): if instance is not None: return tuple([getattr(instance, f) for f in self.safe_field_names]) return self def __set__(self, instance, value): if not isinstance(value, (list, tuple)): raise TypeError('A list or tuple must be used to set the value of ' 'a composite primary key.') if len(value) != len(self.field_names): raise ValueError('The length of the value must equal the number ' 'of columns of the composite primary key.') for idx, field_value in enumerate(value): setattr(instance, self.field_names[idx], field_value) def __eq__(self, other): expressions = [(self.model._meta.fields[field] == value) for field, value in zip(self.field_names, other)] return reduce(operator.and_, expressions) def __ne__(self, other): return ~(self == other) def __hash__(self): return hash((self.model.__name__, self.field_names)) def __sql__(self, ctx): # If the composite PK is being selected, do not use parens. Elsewhere, # such as in an expression, we want to use parentheses and treat it as # a row value. parens = ctx.scope != SCOPE_SOURCE return ctx.sql(NodeList([self.model._meta.fields[field] for field in self.field_names], ', ', parens)) def bind(self, model, name, set_attribute=True): self.model = model self.column_name = self.name = self.safe_name = name setattr(model, self.name, self) class _SortedFieldList(object): __slots__ = ('_keys', '_items') def __init__(self): self._keys = [] self._items = [] def __getitem__(self, i): return self._items[i] def __iter__(self): return iter(self._items) def __contains__(self, item): k = item._sort_key i = bisect_left(self._keys, k) j = bisect_right(self._keys, k) return item in self._items[i:j] def index(self, field): return self._keys.index(field._sort_key) def insert(self, item): k = item._sort_key i = bisect_left(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item) def remove(self, item): idx = self.index(item) del self._items[idx] del self._keys[idx] # MODELS class SchemaManager(object): def __init__(self, model, database=None, **context_options): self.model = model self._database = database context_options.setdefault('scope', SCOPE_VALUES) self.context_options = context_options @property def database(self): db = self._database or self.model._meta.database if db is None: raise ImproperlyConfigured('database attribute does not appear to ' 'be set on the model: %s' % self.model) return db @database.setter def database(self, value): self._database = value def _create_context(self): return self.database.get_sql_context(**self.context_options) def _create_table(self, safe=True, **options): is_temp = options.pop('temporary', False) ctx = self._create_context() ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ') if safe: ctx.literal('IF NOT EXISTS ') ctx.sql(self.model).literal(' ') columns = [] constraints = [] meta = self.model._meta if meta.composite_key: pk_columns = [meta.fields[field_name].column for field_name in meta.primary_key.field_names] constraints.append(NodeList((SQL('PRIMARY KEY'), EnclosedNodeList(pk_columns)))) for field in meta.sorted_fields: columns.append(field.ddl(ctx)) if isinstance(field, ForeignKeyField) and not field.deferred: constraints.append(field.foreign_key_constraint()) if meta.constraints: constraints.extend(meta.constraints) constraints.extend(self._create_table_option_sql(options)) ctx.sql(EnclosedNodeList(columns + constraints)) if meta.table_settings is not None: table_settings = ensure_tuple(meta.table_settings) for setting in table_settings: if not isinstance(setting, basestring): raise ValueError('table_settings must be strings') ctx.literal(' ').literal(setting) if meta.without_rowid: ctx.literal(' WITHOUT ROWID') return ctx def _create_table_option_sql(self, options): accum = [] options = merge_dict(self.model._meta.options or {}, options) if not options: return accum for key, value in sorted(options.items()): if not isinstance(value, Node): if is_model(value): value = value._meta.table else: value = SQL(str(value)) accum.append(NodeList((SQL(key), value), glue='=')) return accum def create_table(self, safe=True, **options): self.database.execute(self._create_table(safe=safe, **options)) def _create_table_as(self, table_name, query, safe=True, **meta): ctx = (self._create_context() .literal('CREATE TEMPORARY TABLE ' if meta.get('temporary') else 'CREATE TABLE ')) if safe: ctx.literal('IF NOT EXISTS ') return (ctx .sql(Entity(table_name)) .literal(' AS ') .sql(query)) def create_table_as(self, table_name, query, safe=True, **meta): ctx = self._create_table_as(table_name, query, safe=safe, **meta) self.database.execute(ctx) def _drop_table(self, safe=True, **options): ctx = (self._create_context() .literal('DROP TABLE IF EXISTS ' if safe else 'DROP TABLE ') .sql(self.model)) if options.get('cascade'): ctx = ctx.literal(' CASCADE') elif options.get('restrict'): ctx = ctx.literal(' RESTRICT') return ctx def drop_table(self, safe=True, **options): self.database.execute(self._drop_table(safe=safe, **options)) def _truncate_table(self, restart_identity=False, cascade=False): db = self.database if not db.truncate_table: return (self._create_context() .literal('DELETE FROM ').sql(self.model)) ctx = self._create_context().literal('TRUNCATE TABLE ').sql(self.model) if restart_identity: ctx = ctx.literal(' RESTART IDENTITY') if cascade: ctx = ctx.literal(' CASCADE') return ctx def truncate_table(self, restart_identity=False, cascade=False): self.database.execute(self._truncate_table(restart_identity, cascade)) def _create_indexes(self, safe=True): return [self._create_index(index, safe) for index in self.model._meta.fields_to_index()] def _create_index(self, index, safe=True): if isinstance(index, Index): if not self.database.safe_create_index: index = index.safe(False) elif index._safe != safe: index = index.safe(safe) return self._create_context().sql(index) def create_indexes(self, safe=True): for query in self._create_indexes(safe=safe): self.database.execute(query) def _drop_indexes(self, safe=True): return [self._drop_index(index, safe) for index in self.model._meta.fields_to_index() if isinstance(index, Index)] def _drop_index(self, index, safe): statement = 'DROP INDEX ' if safe and self.database.safe_drop_index: statement += 'IF EXISTS ' if isinstance(index._table, Table) and index._table._schema: index_name = Entity(index._table._schema, index._name) else: index_name = Entity(index._name) return (self ._create_context() .literal(statement) .sql(index_name)) def drop_indexes(self, safe=True): for query in self._drop_indexes(safe=safe): self.database.execute(query) def _check_sequences(self, field): if not field.sequence or not self.database.sequences: raise ValueError('Sequences are either not supported, or are not ' 'defined for "%s".' % field.name) def _sequence_for_field(self, field): if field.model._meta.schema: return Entity(field.model._meta.schema, field.sequence) else: return Entity(field.sequence) def _create_sequence(self, field): self._check_sequences(field) if not self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('CREATE SEQUENCE ') .sql(self._sequence_for_field(field))) def create_sequence(self, field): seq_ctx = self._create_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _drop_sequence(self, field): self._check_sequences(field) if self.database.sequence_exists(field.sequence): return (self ._create_context() .literal('DROP SEQUENCE ') .sql(self._sequence_for_field(field))) def drop_sequence(self, field): seq_ctx = self._drop_sequence(field) if seq_ctx is not None: self.database.execute(seq_ctx) def _create_foreign_key(self, field): name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name, field.column_name, field.rel_model._meta.table_name) return (self ._create_context() .literal('ALTER TABLE ') .sql(field.model) .literal(' ADD CONSTRAINT ') .sql(Entity(_truncate_constraint_name(name))) .literal(' ') .sql(field.foreign_key_constraint())) def create_foreign_key(self, field): self.database.execute(self._create_foreign_key(field)) def create_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.create_sequence(field) def create_all(self, safe=True, **table_options): self.create_sequences() self.create_table(safe, **table_options) self.create_indexes(safe=safe) def drop_sequences(self): if self.database.sequences: for field in self.model._meta.sorted_fields: if field.sequence: self.drop_sequence(field) def drop_all(self, safe=True, drop_sequences=True, **options): self.drop_table(safe, **options) if drop_sequences: self.drop_sequences() class Metadata(object): def __init__(self, model, database=None, table_name=None, indexes=None, primary_key=None, constraints=None, schema=None, only_save_dirty=False, depends_on=None, options=None, db_table=None, table_function=None, table_settings=None, without_rowid=False, temporary=False, legacy_table_names=True, **kwargs): if db_table is not None: __deprecated__('"db_table" has been deprecated in favor of ' '"table_name" for Models.') table_name = db_table self.model = model self.database = database self.fields = {} self.columns = {} self.combined = {} self._sorted_field_list = _SortedFieldList() self.sorted_fields = [] self.sorted_field_names = [] self.defaults = {} self._default_by_name = {} self._default_dict = {} self._default_callables = {} self._default_callable_list = [] self.name = model.__name__.lower() self.table_function = table_function self.legacy_table_names = legacy_table_names if not table_name: table_name = (self.table_function(model) if self.table_function else self.make_table_name()) self.table_name = table_name self._table = None self.indexes = list(indexes) if indexes else [] self.constraints = constraints self._schema = schema self.primary_key = primary_key self.composite_key = self.auto_increment = None self.only_save_dirty = only_save_dirty self.depends_on = depends_on self.table_settings = table_settings self.without_rowid = without_rowid self.temporary = temporary self.refs = {} self.backrefs = {} self.model_refs = collections.defaultdict(list) self.model_backrefs = collections.defaultdict(list) self.manytomany = {} self.options = options or {} for key, value in kwargs.items(): setattr(self, key, value) self._additional_keys = set(kwargs.keys()) # Allow objects to register hooks that are called if the model is bound # to a different database. For example, BlobField uses a different # Python data-type depending on the db driver / python version. When # the database changes, we need to update any BlobField so they can use # the appropriate data-type. self._db_hooks = [] def make_table_name(self): if self.legacy_table_names: return re.sub(r'[^\w]+', '_', self.name) return make_snake_case(self.model.__name__) def model_graph(self, refs=True, backrefs=True, depth_first=True): if not refs and not backrefs: raise ValueError('One of `refs` or `backrefs` must be True.') accum = [(None, self.model, None)] seen = set() queue = collections.deque((self,)) method = queue.pop if depth_first else queue.popleft while queue: curr = method() if curr in seen: continue seen.add(curr) if refs: for fk, model in curr.refs.items(): accum.append((fk, model, False)) queue.append(model._meta) if backrefs: for fk, model in curr.backrefs.items(): accum.append((fk, model, True)) queue.append(model._meta) return accum def add_ref(self, field): rel = field.rel_model self.refs[field] = rel self.model_refs[rel].append(field) rel._meta.backrefs[field] = self.model rel._meta.model_backrefs[self.model].append(field) def remove_ref(self, field): rel = field.rel_model del self.refs[field] self.model_refs[rel].remove(field) del rel._meta.backrefs[field] rel._meta.model_backrefs[self.model].remove(field) def add_manytomany(self, field): self.manytomany[field.name] = field def remove_manytomany(self, field): del self.manytomany[field.name] @property def table(self): if self._table is None: self._table = Table( self.table_name, [field.column_name for field in self.sorted_fields], schema=self.schema, _model=self.model, _database=self.database) return self._table @table.setter def table(self, value): raise AttributeError('Cannot set the "table".') @table.deleter def table(self): self._table = None @property def schema(self): return self._schema @schema.setter def schema(self, value): self._schema = value del self.table @property def entity(self): if self._schema: return Entity(self._schema, self.table_name) else: return Entity(self.table_name) def _update_sorted_fields(self): self.sorted_fields = list(self._sorted_field_list) self.sorted_field_names = [f.name for f in self.sorted_fields] def get_rel_for_model(self, model): if isinstance(model, ModelAlias): model = model.model forwardrefs = self.model_refs.get(model, []) backrefs = self.model_backrefs.get(model, []) return (forwardrefs, backrefs) def add_field(self, field_name, field, set_attribute=True): if field_name in self.fields: self.remove_field(field_name) elif field_name in self.manytomany: self.remove_manytomany(self.manytomany[field_name]) if not isinstance(field, MetaField): del self.table field.bind(self.model, field_name, set_attribute) self.fields[field.name] = field self.columns[field.column_name] = field self.combined[field.name] = field self.combined[field.column_name] = field self._sorted_field_list.insert(field) self._update_sorted_fields() if field.default is not None: # This optimization helps speed up model instance construction. self.defaults[field] = field.default if callable_(field.default): self._default_callables[field] = field.default self._default_callable_list.append((field.name, field.default)) else: self._default_dict[field] = field.default self._default_by_name[field.name] = field.default else: field.bind(self.model, field_name, set_attribute) if isinstance(field, ForeignKeyField): self.add_ref(field) elif isinstance(field, ManyToManyField) and field.name: self.add_manytomany(field) def remove_field(self, field_name): if field_name not in self.fields: return del self.table original = self.fields.pop(field_name) del self.columns[original.column_name] del self.combined[field_name] try: del self.combined[original.column_name] except KeyError: pass self._sorted_field_list.remove(original) self._update_sorted_fields() if original.default is not None: del self.defaults[original] if self._default_callables.pop(original, None): for i, (name, _) in enumerate(self._default_callable_list): if name == field_name: self._default_callable_list.pop(i) break else: self._default_dict.pop(original, None) self._default_by_name.pop(original.name, None) if isinstance(original, ForeignKeyField): self.remove_ref(original) def set_primary_key(self, name, field): self.composite_key = isinstance(field, CompositeKey) self.add_field(name, field) self.primary_key = field self.auto_increment = ( field.auto_increment or bool(field.sequence)) def get_primary_keys(self): if self.composite_key: return tuple([self.fields[field_name] for field_name in self.primary_key.field_names]) else: return (self.primary_key,) if self.primary_key is not False else () def get_default_dict(self): dd = self._default_by_name.copy() for field_name, default in self._default_callable_list: dd[field_name] = default() return dd def fields_to_index(self): indexes = [] for f in self.sorted_fields: if f.primary_key: continue if f.index or f.unique: indexes.append(ModelIndex(self.model, (f,), unique=f.unique, using=f.index_type)) for index_obj in self.indexes: if isinstance(index_obj, Node): indexes.append(index_obj) elif isinstance(index_obj, (list, tuple)): index_parts, unique = index_obj fields = [] for part in index_parts: if isinstance(part, basestring): fields.append(self.combined[part]) elif isinstance(part, Node): fields.append(part) else: raise ValueError('Expected either a field name or a ' 'subclass of Node. Got: %s' % part) indexes.append(ModelIndex(self.model, fields, unique=unique)) return indexes def set_database(self, database): self.database = database self.model._schema._database = database del self.table # Apply any hooks that have been registered. for hook in self._db_hooks: hook(database) def set_table_name(self, table_name): self.table_name = table_name del self.table class SubclassAwareMetadata(Metadata): models = [] def __init__(self, model, *args, **kwargs): super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs) self.models.append(model) def map_models(self, fn): for model in self.models: fn(model) class DoesNotExist(Exception): pass class ModelBase(type): inheritable = set(['constraints', 'database', 'indexes', 'primary_key', 'options', 'schema', 'table_function', 'temporary', 'only_save_dirty', 'legacy_table_names', 'table_settings']) def __new__(cls, name, bases, attrs): if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE: return super(ModelBase, cls).__new__(cls, name, bases, attrs) meta_options = {} meta = attrs.pop('Meta', None) if meta: for k, v in meta.__dict__.items(): if not k.startswith('_'): meta_options[k] = v pk = getattr(meta, 'primary_key', None) pk_name = parent_pk = None # Inherit any field descriptors by deep copying the underlying field # into the attrs of the new model, additionally see if the bases define # inheritable model options and swipe them. for b in bases: if not hasattr(b, '_meta'): continue base_meta = b._meta if parent_pk is None: parent_pk = deepcopy(base_meta.primary_key) all_inheritable = cls.inheritable | base_meta._additional_keys for k in base_meta.__dict__: if k in all_inheritable and k not in meta_options: meta_options[k] = base_meta.__dict__[k] meta_options.setdefault('schema', base_meta.schema) for (k, v) in b.__dict__.items(): if k in attrs: continue if isinstance(v, FieldAccessor) and not v.field.primary_key: attrs[k] = deepcopy(v.field) sopts = meta_options.pop('schema_options', None) or {} Meta = meta_options.get('model_metadata_class', Metadata) Schema = meta_options.get('schema_manager_class', SchemaManager) # Construct the new class. cls = super(ModelBase, cls).__new__(cls, name, bases, attrs) cls.__data__ = cls.__rel__ = None cls._meta = Meta(cls, **meta_options) cls._schema = Schema(cls, **sopts) fields = [] for key, value in cls.__dict__.items(): if isinstance(value, Field): if value.primary_key and pk: raise ValueError('over-determined primary key %s.' % name) elif value.primary_key: pk, pk_name = value, key else: fields.append((key, value)) if pk is None: if parent_pk is not False: pk, pk_name = ((parent_pk, parent_pk.name) if parent_pk is not None else (AutoField(), 'id')) else: pk = False elif isinstance(pk, CompositeKey): pk_name = '__composite_key__' cls._meta.composite_key = True if pk is not False: cls._meta.set_primary_key(pk_name, pk) for name, field in fields: cls._meta.add_field(name, field) # Create a repr and error class before finalizing. if hasattr(cls, '__str__') and '__repr__' not in attrs: setattr(cls, '__repr__', lambda self: '<%s: %s>' % ( cls.__name__, self.__str__())) exc_name = '%sDoesNotExist' % cls.__name__ exc_attrs = {'__module__': cls.__module__} exception_class = type(exc_name, (DoesNotExist,), exc_attrs) cls.DoesNotExist = exception_class # Call validation hook, allowing additional model validation. cls.validate_model() DeferredForeignKey.resolve(cls) return cls def __repr__(self): return '<Model: %s>' % self.__name__ def __iter__(self): return iter(self.select()) def __getitem__(self, key): return self.get_by_id(key) def __setitem__(self, key, value): self.set_by_id(key, value) def __delitem__(self, key): self.delete_by_id(key) def __contains__(self, key): try: self.get_by_id(key) except self.DoesNotExist: return False else: return True def __len__(self): return self.select().count() def __bool__(self): return True __nonzero__ = __bool__ # Python 2. def __sql__(self, ctx): return ctx.sql(self._meta.table) class _BoundModelsContext(_callable_context_manager): def __init__(self, models, database, bind_refs, bind_backrefs): self.models = models self.database = database self.bind_refs = bind_refs self.bind_backrefs = bind_backrefs def __enter__(self): self._orig_database = [] for model in self.models: self._orig_database.append(model._meta.database) model.bind(self.database, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) return self.models def __exit__(self, exc_type, exc_val, exc_tb): for model, db in zip(self.models, self._orig_database): model.bind(db, self.bind_refs, self.bind_backrefs, _exclude=set(self.models)) class Model(with_metaclass(ModelBase, Node)): def __init__(self, *args, **kwargs): if kwargs.pop('__no_default__', None): self.__data__ = {} else: self.__data__ = self._meta.get_default_dict() self._dirty = set(self.__data__) self.__rel__ = {} for k in kwargs: setattr(self, k, kwargs[k]) def __str__(self): return str(self._pk) if self._meta.primary_key is not False else 'n/a' @classmethod def validate_model(cls): pass @classmethod def alias(cls, alias=None): return ModelAlias(cls, alias) @classmethod def select(cls, *fields): is_default = not fields if not fields: fields = cls._meta.sorted_fields return ModelSelect(cls, fields, is_default=is_default) @classmethod def _normalize_data(cls, data, kwargs): normalized = {} if data: if not isinstance(data, dict): if kwargs: raise ValueError('Data cannot be mixed with keyword ' 'arguments: %s' % data) return data for key in data: try: field = (key if isinstance(key, Field) else cls._meta.combined[key]) except KeyError: if not isinstance(key, Node): raise ValueError('Unrecognized field name: "%s" in %s.' % (key, data)) field = key normalized[field] = data[key] if kwargs: for key in kwargs: try: normalized[cls._meta.combined[key]] = kwargs[key] except KeyError: normalized[getattr(cls, key)] = kwargs[key] return normalized @classmethod def update(cls, __data=None, **update): return ModelUpdate(cls, cls._normalize_data(__data, update)) @classmethod def insert(cls, __data=None, **insert): return ModelInsert(cls, cls._normalize_data(__data, insert)) @classmethod def insert_many(cls, rows, fields=None): return ModelInsert(cls, insert=rows, columns=fields) @classmethod def insert_from(cls, query, fields): columns = [getattr(cls, field) if isinstance(field, basestring) else field for field in fields] return ModelInsert(cls, insert=query, columns=columns) @classmethod def replace(cls, __data=None, **insert): return cls.insert(__data, **insert).on_conflict('REPLACE') @classmethod def replace_many(cls, rows, fields=None): return (cls .insert_many(rows=rows, fields=fields) .on_conflict('REPLACE')) @classmethod def raw(cls, sql, *params): return ModelRaw(cls, sql, params) @classmethod def delete(cls): return ModelDelete(cls) @classmethod def create(cls, **query): inst = cls(**query) inst.save(force_insert=True) return inst @classmethod def bulk_create(cls, model_list, batch_size=None): if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] field_names = list(cls._meta.sorted_field_names) if cls._meta.auto_increment: pk_name = cls._meta.primary_key.name field_names.remove(pk_name) if cls._meta.database.returning_clause and \ cls._meta.primary_key is not False: pk_fields = cls._meta.get_primary_keys() else: pk_fields = None fields = [cls._meta.fields[field_name] for field_name in field_names] attrs = [] for field in fields: if isinstance(field, ForeignKeyField): attrs.append(field.object_id_name) else: attrs.append(field.name) for batch in batches: accum = ([getattr(model, f) for f in attrs] for model in batch) res = cls.insert_many(accum, fields=fields).execute() if pk_fields and res is not None: for row, model in zip(res, batch): for (pk_field, obj_id) in zip(pk_fields, row): setattr(model, pk_field.name, obj_id) @classmethod def bulk_update(cls, model_list, fields, batch_size=None): if isinstance(cls._meta.primary_key, CompositeKey): raise ValueError('bulk_update() is not supported for models with ' 'a composite primary key.') # First normalize list of fields so all are field instances. fields = [cls._meta.fields[f] if isinstance(f, basestring) else f for f in fields] # Now collect list of attribute names to use for values. attrs = [field.object_id_name if isinstance(field, ForeignKeyField) else field.name for field in fields] if batch_size is not None: batches = chunked(model_list, batch_size) else: batches = [model_list] n = 0 pk = cls._meta.primary_key for batch in batches: id_list = [model._pk for model in batch] update = {} for field, attr in zip(fields, attrs): accum = [] for model in batch: value = getattr(model, attr) if not isinstance(value, Node): value = field.to_value(value) accum.append((pk.to_value(model._pk), value)) case = Case(pk, accum) update[field] = case n += (cls.update(update) .where(cls._meta.primary_key.in_(id_list)) .execute()) return n @classmethod def noop(cls): return NoopModelSelect(cls, ()) @classmethod def get(cls, *query, **filters): sq = cls.select() if query: # Handle simple lookup using just the primary key. if len(query) == 1 and isinstance(query[0], int): sq = sq.where(cls._meta.primary_key == query[0]) else: sq = sq.where(*query) if filters: sq = sq.filter(**filters) return sq.get() @classmethod def get_or_none(cls, *query, **filters): try: return cls.get(*query, **filters) except DoesNotExist: pass @classmethod def get_by_id(cls, pk): return cls.get(cls._meta.primary_key == pk) @classmethod def set_by_id(cls, key, value): if key is None: return cls.insert(value).execute() else: return (cls.update(value) .where(cls._meta.primary_key == key).execute()) @classmethod def delete_by_id(cls, pk): return cls.delete().where(cls._meta.primary_key == pk).execute() @classmethod def get_or_create(cls, **kwargs): defaults = kwargs.pop('defaults', {}) query = cls.select() for field, value in kwargs.items(): query = query.where(getattr(cls, field) == value) try: return query.get(), False except cls.DoesNotExist: try: if defaults: kwargs.update(defaults) with cls._meta.database.atomic(): return cls.create(**kwargs), True except IntegrityError as exc: try: return query.get(), False except cls.DoesNotExist: raise exc @classmethod def filter(cls, *dq_nodes, **filters): return cls.select().filter(*dq_nodes, **filters) def get_id(self): # Using getattr(self, pk-name) could accidentally trigger a query if # the primary-key is a foreign-key. So we use the safe_name attribute, # which defaults to the field-name, but will be the object_id_name for # foreign-key fields. if self._meta.primary_key is not False: return getattr(self, self._meta.primary_key.safe_name) _pk = property(get_id) @_pk.setter def _pk(self, value): setattr(self, self._meta.primary_key.name, value) def _pk_expr(self): return self._meta.primary_key == self._pk def _prune_fields(self, field_dict, only): new_data = {} for field in only: if isinstance(field, basestring): field = self._meta.combined[field] if field.name in field_dict: new_data[field.name] = field_dict[field.name] return new_data def _populate_unsaved_relations(self, field_dict): for foreign_key_field in self._meta.refs: foreign_key = foreign_key_field.name conditions = ( foreign_key in field_dict and field_dict[foreign_key] is None and self.__rel__.get(foreign_key) is not None) if conditions: setattr(self, foreign_key, getattr(self, foreign_key)) field_dict[foreign_key] = self.__data__[foreign_key] def save(self, force_insert=False, only=None): field_dict = self.__data__.copy() if self._meta.primary_key is not False: pk_field = self._meta.primary_key pk_value = self._pk else: pk_field = pk_value = None if only is not None: field_dict = self._prune_fields(field_dict, only) elif self._meta.only_save_dirty and not force_insert: field_dict = self._prune_fields(field_dict, self.dirty_fields) if not field_dict: self._dirty.clear() return False self._populate_unsaved_relations(field_dict) rows = 1 if self._meta.auto_increment and pk_value is None: field_dict.pop(pk_field.name, None) if pk_value is not None and not force_insert: if self._meta.composite_key: for pk_part_name in pk_field.field_names: field_dict.pop(pk_part_name, None) else: field_dict.pop(pk_field.name, None) if not field_dict: raise ValueError('no data to save!') rows = self.update(**field_dict).where(self._pk_expr()).execute() elif pk_field is not None: pk = self.insert(**field_dict).execute() if pk is not None and (self._meta.auto_increment or pk_value is None): self._pk = pk else: self.insert(**field_dict).execute() self._dirty.clear() return rows def is_dirty(self): return bool(self._dirty) @property def dirty_fields(self): return [f for f in self._meta.sorted_fields if f.name in self._dirty] def dependencies(self, search_nullable=False): model_class = type(self) stack = [(type(self), None)] seen = set() while stack: klass, query = stack.pop() if klass in seen: continue seen.add(klass) for fk, rel_model in klass._meta.backrefs.items(): if rel_model is model_class or query is None: node = (fk == self.__data__[fk.rel_field.name]) else: node = fk << query subquery = (rel_model.select(rel_model._meta.primary_key) .where(node)) if not fk.null or search_nullable: stack.append((rel_model, subquery)) yield (node, fk) def delete_instance(self, recursive=False, delete_nullable=False): if recursive: dependencies = self.dependencies(delete_nullable) for query, fk in reversed(list(dependencies)): model = fk.model if fk.null and not delete_nullable: model.update(**{fk.name: None}).where(query).execute() else: model.delete().where(query).execute() return type(self).delete().where(self._pk_expr()).execute() def __hash__(self): return hash((self.__class__, self._pk)) def __eq__(self, other): return ( other.__class__ == self.__class__ and self._pk is not None and self._pk == other._pk) def __ne__(self, other): return not self == other def __sql__(self, ctx): # NOTE: when comparing a foreign-key field whose related-field is not a # primary-key, then doing an equality test for the foreign-key with a # model instance will return the wrong value; since we would return # the primary key for a given model instance. # # This checks to see if we have a converter in the scope, and that we # are converting a foreign-key expression. If so, we hand the model # instance to the converter rather than blindly grabbing the primary- # key. In the event the provided converter fails to handle the model # instance, then we will return the primary-key. if ctx.state.converter is not None and ctx.state.is_fk_expr: try: return ctx.sql(Value(self, converter=ctx.state.converter)) except (TypeError, ValueError): pass return ctx.sql(Value(getattr(self, self._meta.primary_key.name), converter=self._meta.primary_key.db_value)) @classmethod def bind(cls, database, bind_refs=True, bind_backrefs=True, _exclude=None): is_different = cls._meta.database is not database cls._meta.set_database(database) if bind_refs or bind_backrefs: if _exclude is None: _exclude = set() G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs) for _, model, is_backref in G: if model not in _exclude: model._meta.set_database(database) _exclude.add(model) return is_different @classmethod def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True): return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs) @classmethod def table_exists(cls): M = cls._meta return cls._schema.database.table_exists(M.table.__name__, M.schema) @classmethod def create_table(cls, safe=True, **options): if 'fail_silently' in options: __deprecated__('"fail_silently" has been deprecated in favor of ' '"safe" for the create_table() method.') safe = options.pop('fail_silently') if safe and not cls._schema.database.safe_create_index \ and cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.create_all(safe, **options) @classmethod def drop_table(cls, safe=True, drop_sequences=True, **options): if safe and not cls._schema.database.safe_drop_index \ and not cls.table_exists(): return if cls._meta.temporary: options.setdefault('temporary', cls._meta.temporary) cls._schema.drop_all(safe, drop_sequences, **options) @classmethod def truncate_table(cls, **options): cls._schema.truncate_table(**options) @classmethod def index(cls, *fields, **kwargs): return ModelIndex(cls, fields, **kwargs) @classmethod def add_index(cls, *fields, **kwargs): if len(fields) == 1 and isinstance(fields[0], (SQL, Index)): cls._meta.indexes.append(fields[0]) else: cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs)) class ModelAlias(Node): """Provide a separate reference to a model in a query.""" def __init__(self, model, alias=None): self.__dict__['model'] = model self.__dict__['alias'] = alias def __getattr__(self, attr): # Hack to work-around the fact that properties or other objects # implementing the descriptor protocol (on the model being aliased), # will not work correctly when we use getattr(). So we explicitly pass # the model alias to the descriptor's getter. try: obj = self.model.__dict__[attr] except KeyError: pass else: if isinstance(obj, ModelDescriptor): return obj.__get__(None, self) model_attr = getattr(self.model, attr) if isinstance(model_attr, Field): self.__dict__[attr] = FieldAlias.create(self, model_attr) return self.__dict__[attr] return model_attr def __setattr__(self, attr, value): raise AttributeError('Cannot set attributes on model aliases.') def get_field_aliases(self): return [getattr(self, n) for n in self.model._meta.sorted_field_names] def select(self, *selection): if not selection: selection = self.get_field_aliases() return ModelSelect(self, selection) def __call__(self, **kwargs): return self.model(**kwargs) def __sql__(self, ctx): if ctx.scope == SCOPE_VALUES: # Return the quoted table name. return ctx.sql(self.model) if self.alias: ctx.alias_manager[self] = self.alias if ctx.scope == SCOPE_SOURCE: # Define the table and its alias. return (ctx .sql(self.model._meta.entity) .literal(' AS ') .sql(Entity(ctx.alias_manager[self]))) else: # Refer to the table using the alias. return ctx.sql(Entity(ctx.alias_manager[self])) class FieldAlias(Field): def __init__(self, source, field): self.source = source self.model = source.model self.field = field @classmethod def create(cls, source, field): class _FieldAlias(cls, type(field)): pass return _FieldAlias(source, field) def clone(self): return FieldAlias(self.source, self.field) def adapt(self, value): return self.field.adapt(value) def python_value(self, value): return self.field.python_value(value) def db_value(self, value): return self.field.db_value(value) def __getattr__(self, attr): return self.source if attr == 'model' else getattr(self.field, attr) def __sql__(self, ctx): return ctx.sql(Column(self.source, self.field.column_name)) def sort_models(models): models = set(models) seen = set() ordering = [] def dfs(model): if model in models and model not in seen: seen.add(model) for foreign_key, rel_model in model._meta.refs.items(): # Do not depth-first search deferred foreign-keys as this can # cause tables to be created in the incorrect order. if not foreign_key.deferred: dfs(rel_model) if model._meta.depends_on: for dependency in model._meta.depends_on: dfs(dependency) ordering.append(model) names = lambda m: (m._meta.name, m._meta.table_name) for m in sorted(models, key=names): dfs(m) return ordering class _ModelQueryHelper(object): default_row_type = ROW.MODEL def __init__(self, *args, **kwargs): super(_ModelQueryHelper, self).__init__(*args, **kwargs) if not self._database: self._database = self.model._meta.database @Node.copy def objects(self, constructor=None): self._row_type = ROW.CONSTRUCTOR self._constructor = self.model if constructor is None else constructor def _get_cursor_wrapper(self, cursor): row_type = self._row_type or self.default_row_type if row_type == ROW.MODEL: return self._get_model_cursor_wrapper(cursor) elif row_type == ROW.DICT: return ModelDictCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.TUPLE: return ModelTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.NAMED_TUPLE: return ModelNamedTupleCursorWrapper(cursor, self.model, self._returning) elif row_type == ROW.CONSTRUCTOR: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self._constructor) else: raise ValueError('Unrecognized row type: "%s".' % row_type) def _get_model_cursor_wrapper(self, cursor): return ModelObjectCursorWrapper(cursor, self.model, [], self.model) class ModelRaw(_ModelQueryHelper, RawQuery): def __init__(self, model, sql, params, **kwargs): self.model = model self._returning = () super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs) def get(self): try: return self.execute()[0] except IndexError: sql, params = self.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (self.model, sql, params)) class BaseModelSelect(_ModelQueryHelper): def union_all(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs) __add__ = union_all def union(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs) __or__ = union def intersect(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs) __and__ = intersect def except_(self, rhs): return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs) __sub__ = except_ def __iter__(self): if not self._cursor_wrapper: self.execute() return iter(self._cursor_wrapper) def prefetch(self, *subqueries): return prefetch(self, *subqueries) def get(self, database=None): clone = self.paginate(1, 1) clone._cursor_wrapper = None try: return clone.execute(database)[0] except IndexError: sql, params = clone.sql() raise self.model.DoesNotExist('%s instance matching query does ' 'not exist:\nSQL: %s\nParams: %s' % (clone.model, sql, params)) def get_or_none(self, database=None): try: return self.get(database=database) except self.model.DoesNotExist: pass @Node.copy def group_by(self, *columns): grouping = [] for column in columns: if is_model(column): grouping.extend(column._meta.sorted_fields) elif isinstance(column, Table): if not column._columns: raise ValueError('Cannot pass a table to group_by() that ' 'does not have columns explicitly ' 'declared.') grouping.extend([getattr(column, col_name) for col_name in column._columns]) else: grouping.append(column) self._group_by = grouping class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery): def __init__(self, model, *args, **kwargs): self.model = model super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs) def _get_model_cursor_wrapper(self, cursor): return self.lhs._get_model_cursor_wrapper(cursor) def _normalize_model_select(fields_or_models): fields = [] for fm in fields_or_models: if is_model(fm): fields.extend(fm._meta.sorted_fields) elif isinstance(fm, ModelAlias): fields.extend(fm.get_field_aliases()) elif isinstance(fm, Table) and fm._columns: fields.extend([getattr(fm, col) for col in fm._columns]) else: fields.append(fm) return fields class ModelSelect(BaseModelSelect, Select): def __init__(self, model, fields_or_models, is_default=False): self.model = self._join_ctx = model self._joins = {} self._is_default = is_default fields = _normalize_model_select(fields_or_models) super(ModelSelect, self).__init__([model], fields) def clone(self): clone = super(ModelSelect, self).clone() if clone._joins: clone._joins = dict(clone._joins) return clone def select(self, *fields_or_models): if fields_or_models or not self._is_default: self._is_default = False fields = _normalize_model_select(fields_or_models) return super(ModelSelect, self).select(*fields) return self def switch(self, ctx=None): self._join_ctx = self.model if ctx is None else ctx return self def _get_model(self, src): if is_model(src): return src, True elif isinstance(src, Table) and src._model: return src._model, False elif isinstance(src, ModelAlias): return src.model, False elif isinstance(src, ModelSelect): return src.model, False return None, False def _normalize_join(self, src, dest, on, attr): # Allow "on" expression to have an alias that determines the # destination attribute for the joined data. on_alias = isinstance(on, Alias) if on_alias: attr = attr or on._alias on = on.alias() # Obtain references to the source and destination models being joined. src_model, src_is_model = self._get_model(src) dest_model, dest_is_model = self._get_model(dest) if src_model and dest_model: self._join_ctx = dest constructor = dest_model # In the case where the "on" clause is a Column or Field, we will # convert that field into the appropriate predicate expression. if not (src_is_model and dest_is_model) and isinstance(on, Column): if on.source is src: to_field = src_model._meta.columns[on.name] elif on.source is dest: to_field = dest_model._meta.columns[on.name] else: raise AttributeError('"on" clause Column %s does not ' 'belong to %s or %s.' % (on, src_model, dest_model)) on = None elif isinstance(on, Field): to_field = on on = None else: to_field = None fk_field, is_backref = self._generate_on_clause( src_model, dest_model, to_field, on) if on is None: src_attr = 'name' if src_is_model else 'column_name' dest_attr = 'name' if dest_is_model else 'column_name' if is_backref: lhs = getattr(dest, getattr(fk_field, dest_attr)) rhs = getattr(src, getattr(fk_field.rel_field, src_attr)) else: lhs = getattr(src, getattr(fk_field, src_attr)) rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr)) on = (lhs == rhs) if not attr: if fk_field is not None and not is_backref: attr = fk_field.name else: attr = dest_model._meta.name elif on_alias and fk_field is not None and \ attr == fk_field.object_id_name and not is_backref: raise ValueError('Cannot assign join alias to "%s", as this ' 'attribute is the object_id_name for the ' 'foreign-key field "%s"' % (attr, fk_field)) elif isinstance(dest, Source): constructor = dict attr = attr or dest._alias if not attr and isinstance(dest, Table): attr = attr or dest.__name__ return (on, attr, constructor) def _generate_on_clause(self, src, dest, to_field=None, on=None): meta = src._meta is_backref = fk_fields = False # Get all the foreign keys between source and dest, and determine if # the join is via a back-reference. if dest in meta.model_refs: fk_fields = meta.model_refs[dest] elif dest in meta.model_backrefs: fk_fields = meta.model_backrefs[dest] is_backref = True if not fk_fields: if on is not None: return None, False raise ValueError('Unable to find foreign key between %s and %s. ' 'Please specify an explicit join condition.' % (src, dest)) elif to_field is not None: # If the foreign-key field was specified explicitly, remove all # other foreign-key fields from the list. target = (to_field.field if isinstance(to_field, FieldAlias) else to_field) fk_fields = [f for f in fk_fields if ( (f is target) or (is_backref and f.rel_field is to_field))] if len(fk_fields) == 1: return fk_fields[0], is_backref if on is None: # If multiple foreign-keys exist, try using the FK whose name # matches that of the related model. If not, raise an error as this # is ambiguous. for fk in fk_fields: if fk.name == dest._meta.name: return fk, is_backref raise ValueError('More than one foreign key between %s and %s.' ' Please specify which you are joining on.' % (src, dest)) # If there are multiple foreign-keys to choose from and the join # predicate is an expression, we'll try to figure out which # foreign-key field we're joining on so that we can assign to the # correct attribute when resolving the model graph. to_field = None if isinstance(on, Expression): lhs, rhs = on.lhs, on.rhs # Coerce to set() so that we force Python to compare using the # object's hash rather than equality test, which returns a # false-positive due to overriding __eq__. fk_set = set(fk_fields) if isinstance(lhs, Field): lhs_f = lhs.field if isinstance(lhs, FieldAlias) else lhs if lhs_f in fk_set: to_field = lhs_f elif isinstance(rhs, Field): rhs_f = rhs.field if isinstance(rhs, FieldAlias) else rhs if rhs_f in fk_set: to_field = rhs_f return to_field, False @Node.copy def join(self, dest, join_type=JOIN.INNER, on=None, src=None, attr=None): src = self._join_ctx if src is None else src if join_type == JOIN.LATERAL or join_type == JOIN.LEFT_LATERAL: on = True elif join_type != JOIN.CROSS: on, attr, constructor = self._normalize_join(src, dest, on, attr) if attr: self._joins.setdefault(src, []) self._joins[src].append((dest, attr, constructor, join_type)) elif on is not None: raise ValueError('Cannot specify on clause with cross join.') if not self._from_list: raise ValueError('No sources to join on.') item = self._from_list.pop() self._from_list.append(Join(item, dest, join_type, on)) def join_from(self, src, dest, join_type=JOIN.INNER, on=None, attr=None): return self.join(dest, join_type, on, src, attr) def _get_model_cursor_wrapper(self, cursor): if len(self._from_list) == 1 and not self._joins: return ModelObjectCursorWrapper(cursor, self.model, self._returning, self.model) return ModelCursorWrapper(cursor, self.model, self._returning, self._from_list, self._joins) def ensure_join(self, lm, rm, on=None, **join_kwargs): join_ctx = self._join_ctx for dest, _, constructor, _ in self._joins.get(lm, []): if dest == rm: return self return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx) def convert_dict_to_node(self, qdict): accum = [] joins = [] fks = (ForeignKeyField, BackrefAccessor) for key, value in sorted(qdict.items()): curr = self.model if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP: key, op = key.rsplit('__', 1) op = DJANGO_MAP[op] elif value is None: op = DJANGO_MAP['is'] else: op = DJANGO_MAP['eq'] if '__' not in key: # Handle simplest case. This avoids joining over-eagerly when a # direct FK lookup is all that is required. model_attr = getattr(curr, key) else: for piece in key.split('__'): for dest, attr, _, _ in self._joins.get(curr, ()): if attr == piece or (isinstance(dest, ModelAlias) and dest.alias == piece): curr = dest break else: model_attr = getattr(curr, piece) if value is not None and isinstance(model_attr, fks): curr = model_attr.rel_model joins.append(model_attr) accum.append(op(model_attr, value)) return accum, joins def filter(self, *args, **kwargs): # normalize args and kwargs into a new expression if args and kwargs: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & DQ(**kwargs)) elif args: dq_node = (reduce(operator.and_, [a.clone() for a in args]) & ColumnBase()) elif kwargs: dq_node = DQ(**kwargs) & ColumnBase() else: return self.clone() # dq_node should now be an Expression, lhs = Node(), rhs = ... q = collections.deque([dq_node]) dq_joins = [] seen_joins = set() while q: curr = q.popleft() if not isinstance(curr, Expression): continue for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)): if isinstance(piece, DQ): query, joins = self.convert_dict_to_node(piece.query) for join in joins: if join not in seen_joins: dq_joins.append(join) seen_joins.add(join) expression = reduce(operator.and_, query) # Apply values from the DQ object. if piece._negated: expression = Negated(expression) #expression._alias = piece._alias setattr(curr, side, expression) else: q.append(piece) if not args or not kwargs: dq_node = dq_node.lhs query = self.clone() for field in dq_joins: if isinstance(field, ForeignKeyField): lm, rm = field.model, field.rel_model field_obj = field elif isinstance(field, BackrefAccessor): lm, rm = field.model, field.rel_model field_obj = field.field query = query.ensure_join(lm, rm, field_obj) return query.where(dq_node) def create_table(self, name, safe=True, **meta): return self.model._schema.create_table_as(name, self, safe, **meta) def __sql_selection__(self, ctx, is_subquery=False): if self._is_default and is_subquery and len(self._returning) > 1 and \ self.model._meta.primary_key is not False: return ctx.sql(self.model._meta.primary_key) return ctx.sql(CommaNodeList(self._returning)) class NoopModelSelect(ModelSelect): def __sql__(self, ctx): return self.model._meta.database.get_noop_select(ctx) def _get_cursor_wrapper(self, cursor): return CursorWrapper(cursor) class _ModelWriteQueryHelper(_ModelQueryHelper): def __init__(self, model, *args, **kwargs): self.model = model super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs) def returning(self, *returning): accum = [] for item in returning: if is_model(item): accum.extend(item._meta.sorted_fields) else: accum.append(item) return super(_ModelWriteQueryHelper, self).returning(*accum) def _set_table_alias(self, ctx): table = self.model._meta.table ctx.alias_manager[table] = table.__name__ class ModelUpdate(_ModelWriteQueryHelper, Update): pass class ModelInsert(_ModelWriteQueryHelper, Insert): default_row_type = ROW.TUPLE def __init__(self, *args, **kwargs): super(ModelInsert, self).__init__(*args, **kwargs) if self._returning is None and self.model._meta.database is not None: if self.model._meta.database.returning_clause: self._returning = self.model._meta.get_primary_keys() def returning(self, *returning): # By default ModelInsert will yield a `tuple` containing the # primary-key of the newly inserted row. But if we are explicitly # specifying a returning clause and have not set a row type, we will # default to returning model instances instead. if returning and self._row_type is None: self._row_type = ROW.MODEL return super(ModelInsert, self).returning(*returning) def get_default_data(self): return self.model._meta.defaults def get_default_columns(self): fields = self.model._meta.sorted_fields return fields[1:] if self.model._meta.auto_increment else fields class ModelDelete(_ModelWriteQueryHelper, Delete): pass class ManyToManyQuery(ModelSelect): def __init__(self, instance, accessor, rel, *args, **kwargs): self._instance = instance self._accessor = accessor self._src_attr = accessor.src_fk.rel_field.name self._dest_attr = accessor.dest_fk.rel_field.name super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs) def _id_list(self, model_or_id_list): if isinstance(model_or_id_list[0], Model): return [getattr(obj, self._dest_attr) for obj in model_or_id_list] return model_or_id_list def add(self, value, clear_existing=False): if clear_existing: self.clear() accessor = self._accessor src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): query = value.columns( Value(src_id), accessor.dest_fk.rel_field) accessor.through_model.insert_from( fields=[accessor.src_fk, accessor.dest_fk], query=query).execute() else: value = ensure_tuple(value) if not value: return inserts = [{ accessor.src_fk.name: src_id, accessor.dest_fk.name: rel_id} for rel_id in self._id_list(value)] accessor.through_model.insert_many(inserts).execute() def remove(self, value): src_id = getattr(self._instance, self._src_attr) if isinstance(value, SelectQuery): column = getattr(value.model, self._dest_attr) subquery = value.columns(column) return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << subquery) & (self._accessor.src_fk == src_id)) .execute()) else: value = ensure_tuple(value) if not value: return return (self._accessor.through_model .delete() .where( (self._accessor.dest_fk << self._id_list(value)) & (self._accessor.src_fk == src_id)) .execute()) def clear(self): src_id = getattr(self._instance, self._src_attr) return (self._accessor.through_model .delete() .where(self._accessor.src_fk == src_id) .execute()) def safe_python_value(conv_func): def validate(value): try: return conv_func(value) except (TypeError, ValueError): return value return validate class BaseModelCursorWrapper(DictCursorWrapper): def __init__(self, cursor, model, columns): super(BaseModelCursorWrapper, self).__init__(cursor) self.model = model self.select = columns or [] def _initialize_columns(self): combined = self.model._meta.combined table = self.model._meta.table description = self.cursor.description self.ncols = len(self.cursor.description) self.columns = [] self.converters = converters = [None] * self.ncols self.fields = fields = [None] * self.ncols for idx, description_item in enumerate(description): column = description_item[0] dot_index = column.find('.') if dot_index != -1: column = column[dot_index + 1:] column = column.strip('")') self.columns.append(column) try: raw_node = self.select[idx] except IndexError: if column in combined: raw_node = node = combined[column] else: continue else: node = raw_node.unwrap() # Heuristics used to attempt to get the field associated with a # given SELECT column, so that we can accurately convert the value # returned by the database-cursor into a Python object. if isinstance(node, Field): if raw_node._coerce: converters[idx] = node.python_value fields[idx] = node if not raw_node.is_alias(): self.columns[idx] = node.name elif isinstance(node, ColumnBase) and raw_node._converter: converters[idx] = raw_node._converter elif isinstance(node, Function) and node._coerce: if node._python_value is not None: converters[idx] = node._python_value elif node.arguments and isinstance(node.arguments[0], Node): # If the first argument is a field or references a column # on a Model, try using that field's conversion function. # This usually works, but we use "safe_python_value()" so # that if a TypeError or ValueError occurs during # conversion we can just fall-back to the raw cursor value. first = node.arguments[0].unwrap() if isinstance(first, Entity): path = first._path[-1] # Try to look-up by name. first = combined.get(path) if isinstance(first, Field): converters[idx] = safe_python_value(first.python_value) elif column in combined: if node._coerce: converters[idx] = combined[column].python_value if isinstance(node, Column) and node.source == table: fields[idx] = combined[column] initialize = _initialize_columns def process_row(self, row): raise NotImplementedError class ModelDictCursorWrapper(BaseModelCursorWrapper): def process_row(self, row): result = {} columns, converters = self.columns, self.converters fields = self.fields for i in range(self.ncols): attr = columns[i] if attr in result: continue # Don't overwrite if we have dupes. if converters[i] is not None: result[attr] = converters[i](row[i]) else: result[attr] = row[i] return result class ModelTupleCursorWrapper(ModelDictCursorWrapper): constructor = tuple def process_row(self, row): columns, converters = self.columns, self.converters return self.constructor([ (converters[i](row[i]) if converters[i] is not None else row[i]) for i in range(self.ncols)]) class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper): def initialize(self): self._initialize_columns() attributes = [] for i in range(self.ncols): attributes.append(self.columns[i]) self.tuple_class = collections.namedtuple('Row', attributes) self.constructor = lambda row: self.tuple_class(*row) class ModelObjectCursorWrapper(ModelDictCursorWrapper): def __init__(self, cursor, model, select, constructor): self.constructor = constructor self.is_model = is_model(constructor) super(ModelObjectCursorWrapper, self).__init__(cursor, model, select) def process_row(self, row): data = super(ModelObjectCursorWrapper, self).process_row(row) if self.is_model: # Clear out any dirty fields before returning to the user. obj = self.constructor(__no_default__=1, **data) obj._dirty.clear() return obj else: return self.constructor(**data) class ModelCursorWrapper(BaseModelCursorWrapper): def __init__(self, cursor, model, select, from_list, joins): super(ModelCursorWrapper, self).__init__(cursor, model, select) self.from_list = from_list self.joins = joins def initialize(self): self._initialize_columns() selected_src = set([field.model for field in self.fields if field is not None]) select, columns = self.select, self.columns self.key_to_constructor = {self.model: self.model} self.src_is_dest = {} self.src_to_dest = [] accum = collections.deque(self.from_list) dests = set() while accum: curr = accum.popleft() if isinstance(curr, Join): accum.append(curr.lhs) accum.append(curr.rhs) continue if curr not in self.joins: continue is_dict = isinstance(curr, dict) for key, attr, constructor, join_type in self.joins[curr]: if key not in self.key_to_constructor: self.key_to_constructor[key] = constructor # (src, attr, dest, is_dict, join_type). self.src_to_dest.append((curr, attr, key, is_dict, join_type)) dests.add(key) accum.append(key) # Ensure that we accommodate everything selected. for src in selected_src: if src not in self.key_to_constructor: if is_model(src): self.key_to_constructor[src] = src elif isinstance(src, ModelAlias): self.key_to_constructor[src] = src.model # Indicate which sources are also dests. for src, _, dest, _, _ in self.src_to_dest: self.src_is_dest[src] = src in dests and (dest in selected_src or src in selected_src) self.column_keys = [] for idx, node in enumerate(select): key = self.model field = self.fields[idx] if field is not None: if isinstance(field, FieldAlias): key = field.source else: key = field.model else: if isinstance(node, Node): node = node.unwrap() if isinstance(node, Column): key = node.source self.column_keys.append(key) def process_row(self, row): objects = {} object_list = [] for key, constructor in self.key_to_constructor.items(): objects[key] = constructor(__no_default__=True) object_list.append(objects[key]) default_instance = objects[self.model] set_keys = set() for idx, key in enumerate(self.column_keys): # Get the instance corresponding to the selected column/value, # falling back to the "root" model instance. instance = objects.get(key, default_instance) column = self.columns[idx] value = row[idx] if value is not None: set_keys.add(key) if self.converters[idx]: value = self.converters[idx](value) if isinstance(instance, dict): instance[column] = value else: setattr(instance, column, value) # Need to do some analysis on the joins before this. for (src, attr, dest, is_dict, join_type) in self.src_to_dest: instance = objects[src] try: joined_instance = objects[dest] except KeyError: continue # If no fields were set on the destination instance then do not # assign an "empty" instance. if instance is None or dest is None or \ (dest not in set_keys and not self.src_is_dest.get(dest)): continue # If no fields were set on either the source or the destination, # then we have nothing to do here. if instance not in set_keys and dest not in set_keys \ and join_type.endswith('OUTER JOIN'): continue if is_dict: instance[attr] = joined_instance else: setattr(instance, attr, joined_instance) # When instantiating models from a cursor, we clear the dirty fields. for instance in object_list: if isinstance(instance, Model): instance._dirty.clear() return objects[self.model] class PrefetchQuery(collections.namedtuple('_PrefetchQuery', ( 'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))): def __new__(cls, query, fields=None, is_backref=None, rel_models=None, field_to_name=None, model=None): if fields: if is_backref: if rel_models is None: rel_models = [field.model for field in fields] foreign_key_attrs = [field.rel_field.name for field in fields] else: if rel_models is None: rel_models = [field.rel_model for field in fields] foreign_key_attrs = [field.name for field in fields] field_to_name = list(zip(fields, foreign_key_attrs)) model = query.model return super(PrefetchQuery, cls).__new__( cls, query, fields, is_backref, rel_models, field_to_name, model) def populate_instance(self, instance, id_map): if self.is_backref: for field in self.fields: identifier = instance.__data__[field.name] key = (field, identifier) if key in id_map: setattr(instance, field.name, id_map[key]) else: for field, attname in self.field_to_name: identifier = instance.__data__[field.rel_field.name] key = (field, identifier) rel_instances = id_map.get(key, []) for inst in rel_instances: setattr(inst, attname, instance) inst._dirty.clear() setattr(instance, field.backref, rel_instances) def store_instance(self, instance, id_map): for field, attname in self.field_to_name: identity = field.rel_field.python_value(instance.__data__[attname]) key = (field, identity) if self.is_backref: id_map[key] = instance else: id_map.setdefault(key, []) id_map[key].append(instance) def prefetch_add_subquery(sq, subqueries): fixed_queries = [PrefetchQuery(sq)] for i, subquery in enumerate(subqueries): if isinstance(subquery, tuple): subquery, target_model = subquery else: target_model = None if not isinstance(subquery, Query) and is_model(subquery) or \ isinstance(subquery, ModelAlias): subquery = subquery.select() subquery_model = subquery.model fks = backrefs = None for j in reversed(range(i + 1)): fixed = fixed_queries[j] last_query = fixed.query last_model = last_obj = fixed.model if isinstance(last_model, ModelAlias): last_model = last_model.model rels = subquery_model._meta.model_refs.get(last_model, []) if rels: fks = [getattr(subquery_model, fk.name) for fk in rels] pks = [getattr(last_obj, fk.rel_field.name) for fk in rels] else: backrefs = subquery_model._meta.model_backrefs.get(last_model) if (fks or backrefs) and ((target_model is last_obj) or (target_model is None)): break if not fks and not backrefs: tgt_err = ' using %s' % target_model if target_model else '' raise AttributeError('Error: unable to find foreign key for ' 'query: %s%s' % (subquery, tgt_err)) dest = (target_model,) if target_model else None if fks: expr = reduce(operator.or_, [ (fk << last_query.select(pk)) for (fk, pk) in zip(fks, pks)]) subquery = subquery.where(expr) fixed_queries.append(PrefetchQuery(subquery, fks, False, dest)) elif backrefs: expressions = [] for backref in backrefs: rel_field = getattr(subquery_model, backref.rel_field.name) fk_field = getattr(last_obj, backref.name) expressions.append(rel_field << last_query.select(fk_field)) subquery = subquery.where(reduce(operator.or_, expressions)) fixed_queries.append(PrefetchQuery(subquery, backrefs, True, dest)) return fixed_queries def prefetch(sq, *subqueries): if not subqueries: return sq fixed_queries = prefetch_add_subquery(sq, subqueries) deps = {} rel_map = {} for pq in reversed(fixed_queries): query_model = pq.model if pq.fields: for rel_model in pq.rel_models: rel_map.setdefault(rel_model, []) rel_map[rel_model].append(pq) deps.setdefault(query_model, {}) id_map = deps[query_model] has_relations = bool(rel_map.get(query_model)) for instance in pq.query: if pq.fields: pq.store_instance(instance, id_map) if has_relations: for rel in rel_map[query_model]: rel.populate_instance(instance, deps[rel.model]) return list(pq.query)
py
1a446847fab16756e5c5f1753b48dac83b85f4ab
# -*- coding: utf-8 -*- from __future__ import unicode_literals from frappe import _ def get_data(): return [ { "module_name": "ERPNext Turkish", "color": "grey", "icon": "octicon octicon-file-directory", "type": "module", "label": _("ERPNext Turkish") } ]
py
1a4468add36188d8e85bf8679a0a54b5a0befcfe
import logging, sys, time class Logger: def __init__(self): self.activatedLogger = False def animation(self, string=None): if string: sys.stdout.write(string) sys.stdout.flush() sys.stdout.write(".") sys.stdout.flush() time.sleep(0.8) sys.stdout.write(".") sys.stdout.flush() time.sleep(0.8) sys.stdout.write(".") sys.stdout.flush() time.sleep(1) print("\n") def activateLogger(self): self.activatedLogger = True return self def logprint(self, content, animated=False, clog=True): if animated & clog: self.animation(content) elif clog: print(content) if self.activatedLogger: logging.info(content) try: logging.basicConfig(format='%(message)s',filename='logs/datafarm.log', level=logging.INFO) globalLogger=Logger().activateLogger().logprint except FileNotFoundError: print("No `logs` folder found. No logs will be stored...") time.sleep(3) globalLogger=Logger().logprint
py
1a44698b81acd386a725cd194e118bb5d5f6b364
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging def create_logger(name, log_file=None): """ use different log level for file and stream """ l = logging.getLogger(name) formatter = logging.Formatter('[%(asctime)s] %(message)s') l.setLevel(logging.DEBUG) sh = logging.StreamHandler() sh.setFormatter(formatter) sh.setLevel(logging.INFO) l.addHandler(sh) if log_file is not None: fh = logging.FileHandler(log_file) fh.setFormatter(formatter) fh.setLevel(logging.DEBUG) l.addHandler(fh) return l if __name__ == '__main__': logger = create_logger('test') logger = create_logger('test', 'log.txt') logger.info('output to file and stream') logger.debug('output to file')
py
1a4469aaebd763b83a66cb75a96b8687448f7a5d
from django import forms from .models import Address class AddressForm(forms.ModelForm): """ User-related CRUD form """ class Meta: model = Address fields = [ 'nickname', 'name', #'billing_profile', 'address_type', 'address_line_1', 'address_line_2', 'city', 'country', 'state', 'postal_code' ] class AddressCheckoutForm(forms.ModelForm): """ User-related checkout address create form """ class Meta: model = Address fields = [ 'nickname', 'name', #'billing_profile', #'address_type', 'address_line_1', 'address_line_2', 'city', 'country', 'state', 'postal_code' ]
py
1a446a09662d5257201c795079892dce600e14c3
from boggle import Boggle from flask import Flask, render_template, session, jsonify, request # from flask_debugtoolbar import DebugToolbarExtension app = Flask(__name__) app.config["SECRET_KEY"] = "boggleSecretKey99" # debug = DebugToolbarExtension(app) boggle_game = Boggle() @app.route('/') def landing_page(): """Displays the homepage""" return render_template('home.html', css='home.css') @app.route('/game') def game_board(): """Handles displaying the game itself""" board = boggle_game.make_board() session['board'] = board games = session.get('games', 0) high_score = session.get('high-score', 0) return render_template('game_board.html', css='game_board.css', games=games, high_score=high_score) @app.route('/rules-gameplay') def rules_gameplay_page(): """Handles the rules and game play page""" return render_template('rules.html', css='rules.css') @app.route('/game/word-guess') def check_word(): """Checks if the word submitted exists in the words file""" word = request.args['word'] res = {"result": boggle_game.check_valid_word(session['board'], word)} return jsonify(res) @app.route('/game/update', methods=["POST"]) def update_scores(): """Handles updating the games played, and checking/updating of the high score""" games = session.get('games', 0) high_score = session.get('high-score', 0) score = request.json['score'] session['games'] = games + 1 session['high-score'] = max(score, high_score) return jsonify(new_record=score > high_score)
py
1a446a9b69ed95a69270091c7124c1c2c140a28e
# -*- coding: utf-8 -*- """ @author: Miguel Ángel López Robles """ #from PyDBOD import loop import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc #from PyDBOD.ldof import LDOF import sys sys.path.append("..") from ldof import LDOF from load import load_data ######################## ### test with data generated ################## np.random.seed(42) # Generate train data X_inliers = 0.3 * np.random.randn(100, 2) X_inliers = np.r_[X_inliers + 2, X_inliers - 2] # Generate some outliers X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) X = np.r_[X_inliers, X_outliers] n_outliers = len(X_outliers) ground_truth = np.ones(len(X), dtype=int) ground_truth[-n_outliers:] = -1 # use my class ldof = LDOF() coef = ldof.fit_predict(X) #print(coef) y = np.zeros(200,dtype=np.int) y_outlier = np.ones(20,dtype=np.int) y = np.append(y, y_outlier) color = np.array(['k','b']) plt.title("Local Distance-based Outlier Factor (LDOF)") plt.scatter(X[:, 0], X[:, 1], color=color[y], s=3., label='Data points') # plot circles with radius proportional to the outlier scores radius = (coef - coef.min()) / (coef.max() - coef.min()) plt.scatter(X[:, 0], X[:, 1], s=500 * coef, edgecolors='r', facecolors='none', label='Outlier scores') plt.axis('tight') plt.xlim((-5, 5)) plt.ylim((-5, 5)) #plt.xlabel("prediction errors: %d" % (n_errors)) legend = plt.legend(loc='upper left') legend.legendHandles[0]._sizes = [10] legend.legendHandles[1]._sizes = [20] plt.show() y = np.zeros(200) y_outlier = np.ones(20) y = np.append(y, y_outlier) fpr, tpr, _ = roc_curve(y,coef) roc_auc = auc(fpr, tpr) print(roc_auc) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('LDOF') plt.legend(loc="lower right") plt.show() import os os.chdir("..") ############################### ## load a file ############# data = load_data("./data/shuttle-c0-vs-c4.dat") # k = 20 #data = load_data("./data/glass5.dat", sep = ', ') #k=19 #data = load_data("./data/ecoli-0-1-3-7_vs_2-6.dat") #k=25 #data = load_data("./data/yeast5.dat", sep = ', ') #65,65 ldof = LDOF(k=240) coef = ldof.fit_predict(data[:,:-1]) coef_n = (coef - coef.min()) / (coef.max() - coef.min()) #print(coef) #print(coef_n) fpr, tpr, _ = roc_curve(data[:,-1],coef_n) roc_auc = auc(fpr, tpr) print(roc_auc) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('LDOF') plt.legend(loc="lower right") plt.show()
py
1a446aae684200860ad0332a5b14413b2a2d69cb
""" The CharacteristicsHandler will receive a file path, read out its characteristics as needed and return a dictionary with them. More functions can will be added in the future. Tip for usage: import characteristicshandler.CharacteristicsHandler as chan chars = chan.handle_file_path("/path/to/file.hi") """ import os from datetime import datetime class CharacteristicsHandler: """ Class to read out file properties. """ @staticmethod def handle_file_path(file_path: str): # (str) -> Dict[str, str] """ Function to receive a file string and return its characteristics as a dictionary. To be treated as a class, maybe will become one in the future. """ chars = { "name": '', "extension": '', "orig_name": '', "entry_date": '', "keywords": '', # e.g. "word1, word2, word3" to be used with "in" "read_last": '', "updated_last": ''} file_name = file_path.split(os.sep)[-1] chars['orig_name'] = chars['name'] = file_name chars['read_last'] = chars['updated_last'] = chars['entry_date'] = str( datetime.now()) split_name = file_name.split('.') if len(split_name) > 1: chars['extension'] = split_name[-1] return chars if __name__ == '__main__': os.system('touch test test.txt test.testing.tested.txt') chan = CharacteristicsHandler() print(chan.handle_file_path('test')) print(chan.handle_file_path('test.txt')) print(chan.handle_file_path('test.testing.tested.txt')) os.system('rm test test.txt test.testing.tested.txt')
py
1a446ab30e2d44820987e86db69c60eb2bfb2bcb
from copy import deepcopy from typing import Union, Dict, Any, List from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes from checkov.common.graph.graph_builder.utils import calculate_hash, join_trimmed_strings from checkov.common.graph.graph_builder.variable_rendering.breadcrumb_metadata import BreadcrumbMetadata class Block: def __init__( self, name: str, config: Dict[str, Any], path: str, block_type: str, attributes: Dict[str, Any], id: str = "", source: str = "", ) -> None: """ :param name: unique name given to the block, for example :param config: the section in tf_definitions that belong to this block :param path: the file location of the block :param block_type: str :param attributes: dictionary of the block's original attributes in the origin file """ self.name = name self.config = deepcopy(config) self.path = path self.block_type = block_type self.attributes = attributes self.id = id self.source = source self.changed_attributes: Dict[str, List[Any]] = {} self.breadcrumbs: Dict[str, List[Dict[str, Any]]] = {} attributes_to_add = self._extract_inner_attributes() self.attributes.update(attributes_to_add) def _extract_inner_attributes(self) -> Dict[str, Any]: attributes_to_add = {} for attribute_key in self.attributes: attribute_value = self.attributes[attribute_key] if isinstance(attribute_value, dict) or (isinstance(attribute_value, list) and len(attribute_value) > 0 and isinstance(attribute_value[0], dict)): inner_attributes = get_inner_attributes(attribute_key, attribute_value) attributes_to_add.update(inner_attributes) return attributes_to_add def __str__(self) -> str: return f"{self.block_type}: {self.name}" def get_attribute_dict(self) -> Dict[str, Any]: """ :return: map of all the block's native attributes (from the source file), combined with the attributes generated by the module builder. If the attributes are not a primitive type, they are converted to strings. """ base_attributes = self.get_base_attributes() self.get_origin_attributes(base_attributes) if self.changed_attributes: # add changed attributes only for calculating the hash base_attributes["changed_attributes"] = sorted(self.changed_attributes.keys()) if self.breadcrumbs: sorted_breadcrumbs = dict(sorted(self.breadcrumbs.items())) base_attributes[CustomAttributes.RENDERING_BREADCRUMBS] = sorted_breadcrumbs base_attributes[CustomAttributes.HASH] = calculate_hash(base_attributes) if "changed_attributes" in base_attributes: # removed changed attributes if it was added previously for calculating hash. del base_attributes["changed_attributes"] return base_attributes def get_origin_attributes(self, base_attributes: Dict[str, Any]) -> None: for attribute_key in list(self.attributes.keys()): attribute_value = self.attributes[attribute_key] if isinstance(attribute_value, list) and len(attribute_value) == 1: attribute_value = attribute_value[0] if isinstance(attribute_value, (list, dict)): inner_attributes = get_inner_attributes(attribute_key, attribute_value) base_attributes.update(inner_attributes) if attribute_key == "self": base_attributes["self_"] = attribute_value continue else: base_attributes[attribute_key] = attribute_value def get_hash(self) -> str: attributes_dict = self.get_attribute_dict() return attributes_dict.get(CustomAttributes.HASH, "") def update_attribute( self, attribute_key: str, attribute_value: Any, change_origin_id: int, previous_breadcrumbs: List[BreadcrumbMetadata], attribute_at_dest: str ) -> None: if not previous_breadcrumbs or previous_breadcrumbs[-1].vertex_id != change_origin_id: previous_breadcrumbs.append(BreadcrumbMetadata(change_origin_id, attribute_at_dest)) self.update_inner_attribute(attribute_key, self.attributes, attribute_value) attribute_key_parts = attribute_key.split(".") if len(attribute_key_parts) == 1: self.attributes[attribute_key] = attribute_value self.changed_attributes[attribute_key] = previous_breadcrumbs return for i in range(len(attribute_key_parts)): key = join_trimmed_strings(char_to_join=".", str_lst=attribute_key_parts, num_to_trim=i) if key.find(".") > -1: self.attributes[key] = attribute_value attribute_value = {attribute_key_parts[len(attribute_key_parts) - 1 - i]: attribute_value} self.changed_attributes[key] = previous_breadcrumbs def update_inner_attribute( self, attribute_key: str, nested_attributes: Union[List[Any], Dict[str, Any]], value_to_update: Any ) -> None: split_key = attribute_key.split(".") i = 1 curr_key = ".".join(split_key[0:i]) if isinstance(nested_attributes, list): if curr_key.isnumeric(): curr_key_int = int(curr_key) if curr_key_int < len(nested_attributes): if not isinstance(nested_attributes[curr_key_int], dict): nested_attributes[curr_key_int] = value_to_update else: self.update_inner_attribute( ".".join(split_key[i:]), nested_attributes[curr_key_int], value_to_update ) else: for inner in nested_attributes: self.update_inner_attribute(curr_key, inner, value_to_update) elif isinstance(nested_attributes, dict): while curr_key not in nested_attributes and i <= len(split_key): i += 1 curr_key = ".".join(split_key[0:i]) if attribute_key in nested_attributes.keys(): nested_attributes[attribute_key] = value_to_update if len(split_key) == 1 and len(curr_key) > 0: nested_attributes[curr_key] = value_to_update elif curr_key in nested_attributes.keys(): self.update_inner_attribute(".".join(split_key[i:]), nested_attributes[curr_key], value_to_update) def get_export_data(self) -> Dict[str, Union[bool, str]]: return {"type": self.block_type, "name": self.name, "path": self.path} def get_base_attributes(self) -> Dict[str, Union[str, List[str], Dict[str, Any]]]: return { CustomAttributes.BLOCK_NAME: self.name, CustomAttributes.BLOCK_TYPE: self.block_type, CustomAttributes.FILE_PATH: self.path, CustomAttributes.CONFIG: self.config, CustomAttributes.LABEL: str(self), CustomAttributes.ID: self.id, CustomAttributes.SOURCE: self.source, } def get_inner_attributes(attribute_key: str, attribute_value: Union[str, List[str], Dict[str, Any]]) -> Dict[str, Any]: inner_attributes: Dict[str, Any] = {} if isinstance(attribute_value, list) and len(attribute_value) == 1: attribute_value = attribute_value[0] if isinstance(attribute_value, (dict, list)): inner_attributes[attribute_key] = [None] * len(attribute_value) if isinstance(attribute_value, list) else {} iterator: Union[range, List[str]] = range(len(attribute_value)) if isinstance(attribute_value, list) else list(attribute_value.keys()) for key in iterator: if key != "": inner_key = f"{attribute_key}.{key}" inner_value = attribute_value[key] inner_attributes.update(get_inner_attributes(inner_key, inner_value)) inner_attributes[attribute_key][key] = inner_attributes[inner_key] else: del attribute_value[key] else: inner_attributes[attribute_key] = attribute_value return inner_attributes
py
1a446b443876ac408cf09dab75c21221289eb3d5
# Python3 # 有限制修改區域 class Functions(object): @staticmethod def sign(x): return 1 if x > 0 else (-1 if x else 0) def sign(x): return Functions.sign(x)
py
1a446ba5f3771a1ec63a894fd726f957114a53e4
# -*- coding: utf-8 -*- # ***************************************************************************** # NICOS, the Networked Instrument Control System of the MLZ # Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS) # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Module authors: # Jens Krüger <[email protected]> # # ***************************************************************************** """Some devices to simulate the PGAA hardware devices.""" from nicos.core import Attach, Override, Param, Readable class PushReader(Readable): """Read back device for the sample pusher sensors. Since one of the sensors must give the inverse of the `moveable` value this will be achieved by setting the parameter `inverse` at the corresponding device in configuration. """ hardware_access = False attached_devices = { 'moveable': Attach('Active device', Readable), } parameters = { 'inverse': Param('Invert read value', type=bool, default=False), } parameter_overrides = { 'unit': Override(default='', mandatory=False), 'fmtstr': Override(default='%d'), } mapping = { 'up': 0, 'down': 1, } fallback = -1 def doRead(self, maxage=0): if self.inverse: return not self._readRaw(maxage) return self._readRaw(maxage) def _readRaw(self, maxage=0): val = self._attached_moveable.read(maxage) return self.mapping.get(val, self.fallback)
py
1a446bd3867bfb945f0e4b575d9fb743f8caf518
# -*- coding: utf-8 -*- """Language Tour: Generators""" from typing import List, Tuple, Set, Generator, Dict, Iterable, Iterator if __name__ == "__main__": # Ternary compare val: int = 32 print(val if val >= 0 else -val) # List var_list: List[int] = [i for i in range(20) if i % 3 > 0] # => [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] var_list: List[Tuple[int]] = [(i, j) for i in range(2) for j in range(3)] # => [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)] # Set var_set: Set[int] = {n**2 for n in range(12)} # Dict var_set: Dict[int, int] = {n: n**2 for n in range(6)} # => {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25} # Generator/Iterable/Iterator G: Generator[int, None, None] = (n**2 for n in range(12)) G: Iterable[int] = (n**2 for n in range(12)) # Implique G: Iterator[int] = (n**2 for n in range(12)) # Equivalent list(G) # => [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121] list(G) # => [] # Car, itérable qu'une seule fois ! # NOTE: Le type hint indique ici : [YieldType, SendType, ReturnType] # Le choix dépend de l'usage de la fonction. def gen() -> Iterable[int]: """Generates x^2 from x=0 to x=11.""" for idx in range(12): yield idx**2 # A la place de retourner une seule valeur, # on en retourne plusieurs print(*gen()) # => 0 1 4 9 16 25 36 49 64 81 100 121 # Exemple de fonction def gen_primes(max_range: int) -> Iterable[int]: """Generate primes up to max_range""" primes = set() for idx in range(2, max_range): if all(idx % p > 0 for p in primes): primes.add(idx) yield idx print(*gen_primes(100)) # => 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47 53 59 61 67 71 73 79 83 89 97 for prime in gen_primes(100): print(prime) # 2 # 3 # 5 # 7 # 11 # 13 # 17 # 19 # 23 # 29 # 31 # 37 # 41 # 43 # 47 # 53 # 59 # 61 # 67 # 71 # 73 # 79 # 83 # 89 # 97
py
1a446be5d809c60ebc928c2435a267e8786ea570
from django import forms from .models import Topic, Entry class TopicForm(forms.ModelForm): class Meta: model = Topic fields = ['name', 'text'] labels = { # 'name': '主题名字', 'text': 'SUMMARY' } widgets = {'text': forms.Textarea(attrs={'cols': 50})} class EntryForm(forms.ModelForm): class Meta: model = Entry fields = ['text'] labels = {'text': ''} widgets = {'text': forms.Textarea(attrs={'cols': 50})}
py
1a446c8c068013a7a753329855788fde30a3a651
# This file is part of Indico. # Copyright (C) 2002 - 2019 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from __future__ import unicode_literals from indico.core.db.sqlalchemy import db from indico.modules.events.models.persons import PersonLinkBase from indico.util.string import format_repr, return_ascii class SessionBlockPersonLink(PersonLinkBase): """Association between EventPerson and SessionBlock. Also known as a 'session convener' """ __tablename__ = 'session_block_person_links' __auto_table_args = {'schema': 'events'} person_link_backref_name = 'session_block_links' person_link_unique_columns = ('session_block_id',) object_relationship_name = 'session_block' session_block_id = db.Column( db.Integer, db.ForeignKey('events.session_blocks.id'), index=True, nullable=False ) # relationship backrefs: # - session_block (SessionBlock.person_links) @return_ascii def __repr__(self): return format_repr(self, 'id', 'person_id', 'session_block_id', _text=self.full_name)
py
1a446ce7983dde5c73c0e2af4594fa3ed58ec5b2
# Copyright (C) 2020 University of Oxford # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import pickle import netCDF4 import numpy as np import pandas as pd from requests import get # opening netCDF4 files via url is not reliable # (it requires the package to be built with OPenDAP support) # we dowload and write to disk the file before opening it def download_MET_file(url, file_name): try: os.remove(file_name) except: pass # dowload the file from url and save it on disk # get request response = get(url) if response.status_code != 200: return False # open in binary mode with open(file_name, "wb") as file: # write to file file.write(response.content) file.close() return True def load_local_data(): # load the variables dict with open("plugins/WEATHER/input/weather_indicators.json", "r") as read_file: weather_indicators = json.load(read_file) # load grid to GADM level 1 dict with open('plugins/WEATHER/input/adm_1_info.pkl', 'rb') as handle: adm_1_info = pickle.load(handle) # load grid to GADM level 2 dict with open('plugins/WEATHER/input/adm_2_info.pkl', 'rb') as handle: adm_2_info = pickle.load(handle) return weather_indicators, adm_1_info, adm_2_info # dowload the weather data for a single variable for all days in daterange # use the adm_1_info and adm_2_info to assign each point in the grid to the right # GID at level 1 or 2. the dicts also contains the GADM informations on each GID # returns a pandas dataframe def create_aggr_df(indicator, day, variables, adm_1_info, adm_2_info, logger): source = [] date = [] gid = [] country = [] countrycode = [] adm_area_1 = [] adm_area_2 = [] adm_area_3 = [] avg = [] std = [] samplesize = [] valid_percentage = [] logger.debug("downloading data for {} for {}".format(indicator, day.strftime('%Y-%m-%d'))) URL = "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily/" temp_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'netCDF4_file.nc') if not download_MET_file("{}{}/{}{}.nc".format(URL, variables[indicator]['folder'], variables[indicator]['file'], day.strftime('%Y%m%d')), file_name=temp_file): return None nc = netCDF4.Dataset(temp_file) data = nc.variables[variables[indicator]['variable']][:].data.reshape(-1) if 'cloudaltitude' in indicator: # remove default values 9*10^36 data[data > 10e20] = np.nan # Level 1 aggregation for area_0 in adm_1_info: for area_1 in adm_1_info[area_0]: idx_list = [point[0] for point in adm_1_info[area_0][area_1]["points"]] to_avg = [data[idx] for idx in idx_list] samplesize.append(len(to_avg)) source.append("MET") date.append(day.strftime('%Y-%m-%d')) gid.append(adm_1_info[area_0][area_1]["gid"]) country.append(adm_1_info[area_0][area_1]["country"]) countrycode.append(adm_1_info[area_0][area_1]["countrycode"]) adm_area_1.append(adm_1_info[area_0][area_1]["adm_area_1"]) adm_area_2.append(adm_1_info[area_0][area_1]["adm_area_2"]) adm_area_3.append(adm_1_info[area_0][area_1]["adm_area_3"]) if 'cloudaltitude' in indicator: avg.append(np.nanmean(to_avg)) std.append(np.nanstd(to_avg, ddof=1)) valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg))) else: avg.append(np.mean(to_avg)) std.append(np.std(to_avg, ddof=1)) # Level 2 aggregation for area_0 in adm_2_info: for area_1 in adm_2_info[area_0]: for area_2 in adm_2_info[area_0][area_1]: idx_list = [point[0] for point in adm_2_info[area_0][area_1][area_2]["points"]] to_avg = [data[idx] for idx in idx_list] samplesize.append(len(to_avg)) source.append("MET") date.append(day.strftime('%Y-%m-%d')) gid.append(adm_2_info[area_0][area_1][area_2]["gid"]) country.append(adm_2_info[area_0][area_1][area_2]["country"]) countrycode.append(adm_2_info[area_0][area_1][area_2]["countrycode"]) adm_area_1.append(adm_2_info[area_0][area_1][area_2]["adm_area_1"]) adm_area_2.append(adm_2_info[area_0][area_1][area_2]["adm_area_2"]) adm_area_3.append(adm_2_info[area_0][area_1][area_2]["adm_area_3"]) if 'cloudaltitude' in indicator: avg.append(np.nanmean(to_avg)) std.append(np.nanstd(to_avg, ddof=1)) valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg))) else: avg.append(np.mean(to_avg)) std.append(np.std(to_avg, ddof=1)) if 'cloudaltitude' in indicator: d = {'source': source, 'date': date, 'gid': gid, 'country': country, 'countrycode': countrycode, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3, 'samplesize': samplesize, indicator+'_valid': valid_percentage, indicator+'_avg': avg, indicator+'_std': std, } else: d = {'source': source, 'date': date, 'gid': gid, 'country': country, 'countrycode': countrycode, 'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3, 'samplesize': samplesize, indicator+'_avg': avg, indicator+'_std': std, } try: os.remove(temp_file) except: pass return pd.DataFrame(data=d)
py
1a446d1fea88005552df605b62e0848b1b0c965c
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Correctness tests for tf.keras using DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized import numpy as np import six from tensorflow.python import keras from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import random_seed from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.util import nest _RANDOM_SEED = 1337 _EVAL_STEPS = 20 _GLOBAL_BATCH_SIZE = 64 # Note: Please make sure the tests in this file are also covered in # keras_backward_compat_test for features that are supported with both APIs. all_strategies = [ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, strategy_combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy_one_step, ] def eager_mode_test_configuration(): return combinations.combine( mode='eager', use_numpy=[True, False], use_validation_data=[True, False]) def graph_mode_test_configuration(): return combinations.combine( mode='graph', use_numpy=[True, False], use_validation_data=[True, False]) def all_strategy_and_input_config_combinations(): return (combinations.times( combinations.combine( distribution=all_strategies, run_distributed=[True, False]), eager_mode_test_configuration() + graph_mode_test_configuration())) def strategy_minus_tpu_and_input_config_combinations_eager(): return (combinations.times( combinations.combine( distribution=strategy_combinations.strategies_minus_tpu), eager_mode_test_configuration())) def strategies_for_embedding_models(): """Returns distribution strategies to test for embedding models. Since embedding models take longer to train, we disregard DefaultStrategy in order to prevent testing timeouts. """ return [ s for s in all_strategies if s.required_tpu or s.required_gpus or s is strategy_combinations.one_device_strategy ] def test_combinations_for_embedding_model(): # TODO(sourabhbajaj): Enable tests for eager mode eager_mode_strategies = [ s for s in strategies_for_embedding_models() if not s.required_tpu ] return (combinations.times( combinations.combine( distribution=strategies_for_embedding_models(), run_distributed=[True, False]), (graph_mode_test_configuration())) + combinations.times( combinations.combine( distribution=eager_mode_strategies, run_distributed=[False]), (eager_mode_test_configuration()))) def test_combinations_with_tpu_strategies(): tpu_strategies = [ strategy_combinations.tpu_strategy, strategy_combinations.tpu_strategy_one_step ] return (combinations.times( combinations.combine(distribution=tpu_strategies), graph_mode_test_configuration())) class MaybeDistributionScope(object): """Provides a context allowing no distribution strategy.""" def __init__(self, distribution): self._distribution = distribution self._scope = None def __enter__(self): if self._distribution: self._scope = self._distribution.scope() self._scope.__enter__() def __exit__(self, exc_type, value, traceback): if self._distribution: self._scope.__exit__(exc_type, value, traceback) self._scope = None def batch_wrapper(dataset, batch_size, repeat=None): if repeat: dataset = dataset.repeat(repeat) return dataset.batch(batch_size) def get_batch_size(global_batch_size, distribution): batch_size = global_batch_size # TODO(b/118776054): Use global batch size for Keras/DS support. use_per_core_batch_size = ( distribution and not distributed_training_utils.global_batch_size_supported(distribution)) if use_per_core_batch_size: batch_size //= distribution.num_replicas_in_sync return batch_size def get_data_size(data): """Gets the size of data in list, tuple, dict, or a numpy array.""" assert isinstance(data, (np.ndarray, list, dict, tuple)) if isinstance(data, np.ndarray): return len(data) if isinstance(data, (list, tuple)): return len(data[0]) return len(six.next(six.itervalues(data))) def get_shapes(data): shapes = None if all(hasattr(x, 'shape') for x in nest.flatten(data)): shapes = nest.map_structure(lambda x: x.shape, data) return shapes def get_correctness_test_inputs(use_numpy, use_validation_data, with_distribution, x_train, y_train, x_eval, y_eval, x_predict, training_epochs): """Generates the inputs for correctness check when enable Keras with DS.""" global_batch_size = _GLOBAL_BATCH_SIZE batch_size = get_batch_size(global_batch_size, with_distribution) if use_numpy: training_inputs = { 'batch_size': batch_size, 'x': x_train, 'y': y_train, 'epochs': training_epochs, 'shuffle': False, } if use_validation_data: eval_inputs = None training_inputs['validation_data'] = (x_eval, y_eval) else: eval_inputs = { 'batch_size': batch_size, 'x': x_eval, 'y': y_eval, } predict_inputs = {'x': x_predict} else: training_data_size = get_data_size(x_train) # For dataset inputs, we do not pass batch_size to # keras.fit/evaluate/predict. The batch size is part of the dataset. train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train)) x = batch_wrapper(train_dataset, batch_size, repeat=training_epochs) steps_per_epoch = int(np.ceil(1.0 * training_data_size / global_batch_size)) training_inputs = { 'batch_size': None, 'x': x, 'y': None, 'epochs': training_epochs, 'shuffle': False, 'steps_per_epoch': steps_per_epoch } if use_validation_data: eval_inputs = None # Remove the eval_inputs eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) training_inputs['validation_data'] = x training_inputs['validation_steps'] = 5 else: eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_eval, y_eval)) x = batch_wrapper(eval_dataset, batch_size) eval_steps = int(np.ceil(1.0 * get_data_size(x_eval) / global_batch_size)) eval_inputs = { 'batch_size': None, 'x': x, 'y': None, 'steps': eval_steps, } predict_batch_size = get_batch_size( get_data_size(x_predict), with_distribution) predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict) predict_dataset = batch_wrapper(predict_dataset, predict_batch_size) predict_inputs = { 'steps': 1, 'x': predict_dataset, } return training_inputs, eval_inputs, predict_inputs def fit_eval_and_predict(initial_weights, input_fn, model_fn, run_distributed=None, distribution=None, is_stateful_model=False): """Generates results for fit/predict/evaluate for given model.""" training_inputs, eval_inputs, predict_inputs = input_fn() model = model_fn( run_distributed=run_distributed, initial_weights=initial_weights, distribution=distribution, input_shapes=get_shapes(training_inputs['x'])) result = {} result['training_history_1'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_1'] = model.evaluate(**eval_inputs) result['weights_1'] = model.get_weights() if predict_inputs is not None: # Check correctness of the result of predict() invoked # multiple times -- as for stateful models, result of # predict may differ for each batch. predict_length = 1 if is_stateful_model: predict_length = 3 for i in range(predict_length): result_key = 'predict_result_{}'.format(i) result[result_key] = model.predict(**predict_inputs) # Train and eval again to mimic user's flow. result['training_history_2'] = model.fit(**training_inputs).history if eval_inputs is not None: result['eval_result_2'] = model.evaluate(**eval_inputs) result['weights_2'] = model.get_weights() return result def compare_results(results_with_ds, results_without_ds, distribution, testcase, partial_last_batch=None): """Compares results of model compiled with/without distribution strategy.""" if partial_last_batch == 'train_and_eval': # We relax the tolerence a lot in the partial last batch case as # 1. the examples in uneven batches may have different weights when # applying the gradients in the distributed case. # 2. TF Keras and TF Keras DS have different ways to handle the case when # training with epochs > 1 with numpy inputs. In TF Keras, every epoch # may have a partial batch. While in TF Keras DS, as we convert # numpy inputs into dataset, it will do a repeat() first and calculate # steps_per_epoch, so it will at most have one partial batch. This # makes the 1-CPU result even different. default_tolerance = 1e-3 relaxed_tolerance = 1e-3 else: default_tolerance = 1e-5 relaxed_tolerance = 1e-4 def _get_compare_result_tolerance(key): """Returns tolerance to compare results.""" # TODO(b/119257215): For MirroredStrategy, weights are not exactly the same, # so use larger tolerance for now. Predict should be related to weights. if (isinstance(distribution, (mirrored_strategy.MirroredStrategy, distribute_lib._DefaultDistributionStrategy)) and # pylint: disable=protected-access key.startswith(('weights_1', 'weights_2', 'predict_result'))): return relaxed_tolerance return default_tolerance for key in sorted(results_with_ds.keys()): if (key.startswith('training_history') and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and distribution.extended.steps_per_run > 1): # TODO(b/119894254): Enable this test for all cases once the # underlying bug is fixed. continue tolerance = _get_compare_result_tolerance(key) # We don't compare the loss as loss is currently not computed as metric # in Keras, the loss value is inaccurate for last partial batch due to # more weights for the last batch samples. if partial_last_batch is not None: if key.startswith('eval_result'): results_with_ds[key] = results_with_ds[key][1:] results_without_ds[key] = results_without_ds[key][1:] if key.startswith('training_history'): results_with_ds[key]['val_loss'] = 0 results_without_ds[key]['val_loss'] = 0 testcase.assertAllClose( results_with_ds[key], results_without_ds[key], atol=tolerance, rtol=tolerance, msg='Fail to assert {}.'.format(key)) def should_skip_tpu_with_eager(distribution): return (context.executing_eagerly() and isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1))) class LearningRateBatchScheduler(keras.callbacks.Callback): """Scheduler that dynamically sets the learning rate of model.""" def __init__(self, update_freq=None): self._update_freq = update_freq def on_batch_begin(self, batch, logs=None): if self._update_freq and batch % self._update_freq != 0: return # To avoid divergence, limit the value range. lr = 0.001 * (batch % 10) keras.backend.set_value(self.model.optimizer.lr, lr) class TestDistributionStrategyCorrectnessBase(test.TestCase, parameterized.TestCase): """Model agnostic testing infra to test correctness of Keras models.""" def set_up_test_config(self, use_numpy=False, use_validation_data=False, with_batch_norm=False): self.use_numpy = use_numpy self.use_validation_data = use_validation_data self.with_batch_norm = with_batch_norm keras.backend.set_image_data_format('channels_last') np.random.seed(_RANDOM_SEED) random_seed.set_random_seed(_RANDOM_SEED) def get_data(self): num_samples = 10000 x_train = np.random.randint(0, 2, num_samples) x_train = np.reshape(x_train, (num_samples, 1)) y_train = x_train return (x_train.astype('float32'), y_train.astype('float32'), None) def get_data_with_partial_last_batch(self): raise NotImplementedError def get_data_with_partial_last_batch_eval(self): raise NotImplementedError def get_input_for_correctness_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ return get_correctness_test_inputs(**kwargs) def get_model(self, distribution=None, run_distributed=None, input_shapes=None): raise NotImplementedError def skip_unsupported_test_configuration(self, distribution, run_distributed): if should_skip_tpu_with_eager(distribution) and run_distributed: self.skipTest( 'TPUStrategy does not support eager mode with run_distributed.') return def run_correctness_test(self, distribution, use_numpy, use_validation_data, run_distributed=None, with_batch_norm=False, is_stateful_model=False, partial_last_batch=None, training_epochs=2): with self.cached_session(): self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm) self.skip_unsupported_test_configuration(distribution, run_distributed) if partial_last_batch == 'eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch_eval()) elif partial_last_batch == 'train_and_eval': x_train, y_train, x_eval, y_eval, x_predict = ( self.get_data_with_partial_last_batch()) else: x_train, y_train, x_predict = self.get_data() x_eval = x_train y_eval = y_train # The model is built once and the initial weights are saved. # This is used to initialize the model for both the distribution and # non-distribution run. model = self.get_model( run_distributed=run_distributed, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() ds_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=distribution, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) nods_input_fn = functools.partial( self.get_input_for_correctness_test, use_numpy=use_numpy, use_validation_data=use_validation_data, with_distribution=None, x_train=x_train, y_train=y_train, x_eval=x_eval, y_eval=y_eval, x_predict=x_predict, training_epochs=training_epochs) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=distribution, is_stateful_model=is_stateful_model) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=None, is_stateful_model=is_stateful_model) # First, special case, for multi-replica distributed training, batch # norm is not aggregated globally. So it is expected to have different # weights. if (self.with_batch_norm and distribution.num_replicas_in_sync > 1): with self.assertRaises(AssertionError): compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) else: compare_results( results_with_ds, results_without_ds, distribution, testcase=self, partial_last_batch=partial_last_batch) def get_input_for_dynamic_lr_test(self, **kwargs): """Generates inputs that are dictionaries. We only provide a default implementation of this method here. If you need more customized way of providing input to your model, overwrite this method. Arguments: **kwargs: key word arguments about how to create the input dictionaries Returns: Three dictionaries representing the input for fit(), evalutate() and predict() """ training_input = kwargs return training_input, None, None def run_dynamic_lr_test(self, distribution, run_distributed=None): with self.cached_session(): self.set_up_test_config() self.skip_unsupported_test_configuration(distribution, run_distributed) x_train, y_train, _ = self.get_data() model = self.get_model( run_distributed=run_distributed, input_shapes=get_shapes(x_train)) initial_weights = model.get_weights() update_freq = None if (isinstance(distribution, tpu_strategy.TPUStrategyV1) and distribution.extended.steps_per_run > 1): # For TPUStrategy with steps_per_run > 1, the callback is not invoked # every step. So, to compare the CPU/TPU, we let the CPU to behave the # same as TPU. update_freq = distribution.extended.steps_per_run training_epochs = 2 global_batch_size = 64 ds_batch_size = get_batch_size(global_batch_size, distribution) nods_batch_size = get_batch_size(global_batch_size, None) ds_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=ds_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) nods_input_fn = functools.partial( self.get_input_for_dynamic_lr_test, x=x_train, y=y_train, batch_size=nods_batch_size, shuffle=False, epochs=training_epochs, callbacks=[LearningRateBatchScheduler(update_freq)], validation_data=(x_train, y_train)) results_with_ds = fit_eval_and_predict( initial_weights, input_fn=ds_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=distribution) results_without_ds = fit_eval_and_predict( initial_weights, input_fn=nods_input_fn, model_fn=self.get_model, run_distributed=run_distributed, distribution=None) compare_results( results_with_ds, results_without_ds, distribution, testcase=self) class TestDistributionStrategyEmbeddingModelCorrectnessBase( TestDistributionStrategyCorrectnessBase): """Base class to test correctness of Keras models with embedding layers.""" def get_data(self, count=(_GLOBAL_BATCH_SIZE * _EVAL_STEPS), min_words=5, max_words=10, max_word_id=19, num_classes=2): distribution = [] for _ in range(num_classes): dist = np.abs(np.random.randn(max_word_id)) dist /= np.sum(dist) distribution.append(dist) features = [] labels = [] for _ in range(count): label = np.random.randint(0, num_classes, size=1)[0] num_words = np.random.randint(min_words, max_words, size=1)[0] word_ids = np.random.choice( max_word_id, size=num_words, replace=True, p=distribution[label]) word_ids = word_ids labels.append(label) features.append(word_ids) features = keras.preprocessing.sequence.pad_sequences( features, maxlen=max_words) x_train = np.asarray(features, dtype=np.float32) y_train = np.asarray(labels, dtype=np.int32).reshape((count, 1)) x_predict = x_train[:_GLOBAL_BATCH_SIZE] return x_train, y_train, x_predict if __name__ == '__main__': test.main()
py
1a446e325b859dc07427753e10933547d083ca9d
from pathlib import Path from typing import Optional import zlib class DeceptionEnabler(object): """Make sure to put `*.bf` in your .gitignore!""" def __init__(self, binary_extension: str = "gif", bf_extension: str = "bf"): self.binary_ext = ".%s" % binary_extension self.bf_ext = ".%s" % bf_extension def compress_program(self, program: bytes) -> bytes: return zlib.compress(program, level=9) def decompress_program(self, program: bytes) -> bytes: return zlib.decompress(program) def load_from_file(self, fname: str) -> bytes: with open(fname, "rb") as f: compressed_data = f.read() uncompressed_data = self.decompress_program(compressed_data) return uncompressed_data def save_program_to_file(self, program: bytes, out_fname: bool, overwrite: bool = True) -> None: compressed_data = self.compress_program(program) open_flags = "wb" if overwrite_existing is True else "xb" with open(out_fname, open_flags) as f: f.write(compressed_data) def decompress_from_file(self, binary_fname: str, bf_fname: Optional[str] = None) -> None: out_fname = decompressed_fname or str(Path(compressed_fname).with_suffix(self.bf_ext)) uncompressed_data = self.load_program(compressed_fname) with open(out_fname, "wb") as f: f.write(uncompressed_data) def compress_from_file(self, bf_fname: str, binary_fname: Optional[str] = None) -> None: with open(decompressed_fname, "rb") as f: uncompressed_data = f.read() out_fname = compressed_fname or str(Path(decompressed_fname).with_suffix(self.binary_ext)) self.save_program_to_file(uncompressed_data, out_fname)
py
1a446e661d680f646fe24d36829a159b4fee1bd7
# signal definitions for request_profiler from django.dispatch import Signal # Signal sent after profile data has been captured, but before it is # saved. This signal can be used to cancel the profiling by calling the # instance.cancel() method, which sets an internal property telling the # instance not to save itself when capture() is called. request_profile_complete = Signal(providing_args=['request', 'response', 'instance'])
py
1a446f4947ba0207616f85fb76df514067cf366a
#!/usr/bin/python """ Description: Tool for performing benchmarking of programs Copyright (c) 2015, Lucian Radu Teodorescu """ import os, sys, shutil, time, glob, subprocess, resource, struct, numpy from collections import defaultdict import config testsDir = 'tests' resultsDir = 'results' tmpDir = resultsDir + '/tmp' tests = [] memDiv = 1024.0*1024.0 if config.dumpMemAsMB else 1024.0 memUnit = 'MB' if config.dumpMemAsMB else 'KB' def getFileContents(filename): with open(filename) as f: return f.read().rstrip() def measureCommand(command, fout): resReadPipe, resWritePipe = os.pipe() pid = os.fork() if pid == 0: isTimeout = False try: # Start executing the command # print "Running: %s" % command command = command.split() p = subprocess.Popen(command, stderr=subprocess.STDOUT, stdout=fout) # Wait until the command is finished, or we reach the timeout timeout = config.testTimeout while p.poll() is None and timeout > 0: time.sleep(1) timeout -= 1 if not timeout > 0: p.terminate() isTimeout = True except Exception as e: print 'RUN ERROR: %s' % str(e) isTimeout = True # Send back the results and quit rusage = resource.getrusage(resource.RUSAGE_CHILDREN) ttime = rusage.ru_utime + rusage.ru_stime os.write(resWritePipe, struct.pack('?', isTimeout)) os.write(resWritePipe, struct.pack('f', ttime)) os.write(resWritePipe, struct.pack('L', rusage.ru_maxrss)) sys.exit(0) # Read the results from the forked process isTimeout = struct.unpack('?', os.read(resReadPipe, struct.calcsize('?')))[0] ttime = struct.unpack('f', os.read(resReadPipe, struct.calcsize('f')))[0] maxrss = struct.unpack('L', os.read(resReadPipe, struct.calcsize('L')))[0] return (isTimeout, ttime, maxrss) class Test: def __init__(self, dir): self.name = os.path.basename(dir) self.dir = dir self.programs = [] self.runArgs = [] self.results = defaultdict(lambda: []) def __repr__(self): return "Test(%s, programs=%s, args=%s)" % (self.name, self.programs, self.runArgs) def compile(self): print " %-20s\t" % self.name, os.chdir(self.dir) logFilename = '%s/comp_%s.log' % (tmpDir, self.name) with open(logFilename, 'w') as f: if config.cleanBeforeBuild: res = subprocess.call(['make', 'clean'], stderr=subprocess.STDOUT, stdout=f) if res != 0: raise Exception("Cannot execute 'make clean' on programs; check the log file: %s" % logFilename) res = subprocess.call(['make'], stderr=subprocess.STDOUT, stdout=f) if res != 0: raise Exception("Cannot compile the programs; check the log file: %s" % logFilename) # Gather programs; results a list of (name, executable) res = getFileContents('programs.in') progs = res.rstrip().split('\n') progs = filter(lambda p: not p.startswith('#'), progs) self.programs = [] for p in progs: # Check if the line is of the form <name>:<executable> colon = p.find(':') if colon >= 0: self.programs.append( (p[0:colon].strip(), p[colon+1:].strip()) ) else: name = p; if p.startswith('./'): name = p[2:] name = name.replace('/', '_') self.programs.append( (name, p) ) # Gather running arguments res = getFileContents('args.in') self.runArgs = res.rstrip().split('\n') self.runArgs = filter(lambda p: not p.startswith('#'), self.runArgs) if len(self.programs) > 5: print '%d programs' % len(self.programs), else: print [p[0] for p in self.programs], print " / ", if len(self.runArgs) > 7: print '%d args sets' % len(self.runArgs), else: print self.runArgs def run(self): # Run the programs resLogFilename = '%s/results_%s.log' % (resultsDir, self.name) with open(resLogFilename, 'w') as flog: for prog in self.programs: for args in self.runArgs: for r in range(0, config.numRepeats): progName = prog[0] progExe = prog[1] print " %s: %s %s (%d)\t\t" % (self.name, progName, args, r+1), print >>flog, "\n%s: %s %s (%d)" % (self.name, progName, args, r+1) print >>flog, " > %s %s" % (progExe, args) sys.stdout.flush() flog.flush() logFilename = '%s/%s.%s %s.%d.run.log' % (tmpDir, self.name, progName, args, r+1) with open(logFilename, 'w') as fout: os.chdir(self.dir) isTimeout, time, mem = measureCommand("%s %s" % (progExe, args), fout) if isTimeout: print "TIMEOUT - time: %f, mem: %f %s" % (time, mem/memDiv, memUnit) print >>flog, "TIMEOUT - time: %f, mem: %f %s" % (time, mem/memDiv, memUnit) time = config.testTimeout + 1 else: print "time: %f, mem: %f %s" % (time, mem/memDiv, memUnit) print >>flog, "time: %f, mem: %f %s" % (time, mem/memDiv, memUnit) sys.stdout.flush() flog.flush(); self.results[(progName, args)].append((time, mem)) # Average and print the results csvFilename = '%s/results_%s.csv' % (resultsDir, self.name) print "" print "Results for '%s'" % self.name print >>flog, "" print >>flog, "Test results:" with open(csvFilename, 'w') as fout: print '# Program name, args, time (s), time deviation (s), memory (%s), memory deviation (%s)' % (memUnit, memUnit) print >>fout, '# Program name, args, time (s), time deviation (s), memory (%s), memory deviation (%s)' % (memUnit, memUnit) for k in sorted(self.results): val = self.results[k] print >>flog, "%s %s: %s" % (k[0], k[1], val), if config.ignoreFirstRun: val.pop(0) times, mems = zip(*val) timeAvg = numpy.mean(times) timeStd = numpy.std(times) memAvg = numpy.mean(mems) / memDiv memStd = numpy.std(mems) / memDiv print '%s, \t%s,\t %f, \t%f, \t%f, \t%f' % (k[0], k[1], timeAvg, timeStd, memAvg, memStd) print >>fout, '%s, \t%s,\t %f, \t%f, \t%f, \t%f' % (k[0], k[1], timeAvg, timeStd, memAvg, memStd) print >>flog, '\t=> (%f, %f)-(%f, %f)' % (timeAvg, timeStd, memAvg, memStd) print "" def ensureCleanDir(dir): if os.path.isdir(dir): shutil.rmtree(dir) os.makedirs(dir) def checkDirectories(): thisDir = os.path.dirname(os.path.realpath(__file__)) global testsDir global resultsDir global tmpDir testsDir = thisDir + '/' + testsDir resultsDir = thisDir + '/' + resultsDir tmpDir = thisDir + '/' + tmpDir if not os.path.isdir(testsDir): print 'Cannot find tests directory: %s' % testsDir sys.exit(1) ensureCleanDir(resultsDir) ensureCleanDir(tmpDir) def gatherTests(): if os.path.isfile(testsDir+'/programs.in'): # Don't consider the subdirs; all the data is in the tests folder tests.append(Test(testsDir)) else: for d in glob.glob(testsDir+'/*'): if os.path.isdir(d): if os.path.splitext(os.path.basename(d))[0].startswith('.'): continue tests.append(Test(d)) print ' available tests: %s' % [t.name for t in tests] def main(): print 'bench_tool, copyright (c) 2015 Lucian Radu Teodorescu' oldDir = os.getcwd() try: print 'Initializing...' checkDirectories() gatherTests() print 'Compiling programs...' for t in tests: t.compile() print 'Performing the benchmark...' for t in tests: t.run() except KeyboardInterrupt: print 'INTERRUPTED' except Exception as e: print 'ERROR: %s' % str(e) os.chdir(oldDir) print '' if __name__ == "__main__": main()
py
1a446fc9489260571c1afd9a050c49c55fa54d56
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 Raphael Michel and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # import logging from django import forms from django.conf import settings from django.utils.translation import gettext as _ from oauth2_provider.exceptions import FatalClientError, OAuthToolkitError from oauth2_provider.forms import AllowForm from oauth2_provider.settings import oauth2_settings from oauth2_provider.views import ( AuthorizationView as BaseAuthorizationView, RevokeTokenView as BaseRevokeTokenView, TokenView as BaseTokenView, ) from pretix.api.models import OAuthApplication from pretix.base.models import Organizer logger = logging.getLogger(__name__) class OAuthAllowForm(AllowForm): organizers = forms.ModelMultipleChoiceField( queryset=Organizer.objects.none(), widget=forms.CheckboxSelectMultiple ) def __init__(self, *args, **kwargs): user = kwargs.pop('user') scope = kwargs.pop('scope') super().__init__(*args, **kwargs) self.fields['organizers'].queryset = Organizer.objects.filter( pk__in=user.teams.values_list('organizer', flat=True)) if scope == 'profile': del self.fields['organizers'] class AuthorizationView(BaseAuthorizationView): template_name = "pretixcontrol/auth/oauth_authorization.html" form_class = OAuthAllowForm def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['user'] = self.request.user kwargs['scope'] = self.request.GET.get('scope') return kwargs def get_context_data(self, **kwargs): ctx = super().get_context_data(**kwargs) ctx['settings'] = settings return ctx def validate_authorization_request(self, request): require_approval = request.GET.get("approval_prompt", oauth2_settings.REQUEST_APPROVAL_PROMPT) if require_approval != 'force' and request.GET.get('scope') != 'profile': raise FatalClientError('Combnination of require_approval and scope values not allowed.') return super().validate_authorization_request(request) def create_authorization_response(self, request, scopes, credentials, allow, organizers=None): credentials["organizers"] = organizers or [] return super().create_authorization_response(request, scopes, credentials, allow) def form_valid(self, form): client_id = form.cleaned_data["client_id"] application = OAuthApplication.objects.get(client_id=client_id) credentials = { "client_id": form.cleaned_data.get("client_id"), "redirect_uri": form.cleaned_data.get("redirect_uri"), "response_type": form.cleaned_data.get("response_type", None), "state": form.cleaned_data.get("state", None), } scopes = form.cleaned_data.get("scope") allow = form.cleaned_data.get("allow") try: uri, headers, body, status = self.create_authorization_response( request=self.request, scopes=scopes, credentials=credentials, allow=allow, organizers=form.cleaned_data.get("organizers") ) except OAuthToolkitError as error: return self.error_response(error, application) self.success_url = uri logger.debug("Success url for the request: {0}".format(self.success_url)) msgs = [ _('The application "{application_name}" has been authorized to access your account.').format( application_name=application.name ) ] self.request.user.send_security_notice(msgs) self.request.user.log_action('pretix.user.oauth.authorized', user=self.request.user, data={ 'application_id': application.pk, 'application_name': application.name, }) return self.redirect(self.success_url, application) class TokenView(BaseTokenView): pass class RevokeTokenView(BaseRevokeTokenView): pass
py
1a4470a4f852722d9f2504bdbcbf561a59fe7dc3
# Adapted from Sebastian Noack's python-goto, originally licensed under the # Unlicence and re-licenced under Apache 2.0 as part of Pomagma. import pytest from goto import goto, label, with_goto CODE = '''\ i = 0 result = [] label.start if i == 10: goto.end result.append(i) i += 1 goto.start label.end ''' EXPECTED = list(range(10)) def test_range_as_code(): ns = {} exec(with_goto(compile(CODE, '', 'exec')), ns) assert ns['result'] == EXPECTED def test_range_as_function(): ns = {} exec('\n'.join( ['def func():'] + ['\t' + x for x in CODE.splitlines() + ['return result']] ), ns) assert with_goto(ns['func'])() == EXPECTED def test_jump_out_of_loop(): @with_goto def func(): for i in range(10): goto.end label.end return i assert func() == 0 def test_jump_into_loop(): def func(): for i in range(10): label.loop goto.loop pytest.raises(SyntaxError, with_goto, func) def test_jump_out_of_nested_4_loops(): @with_goto def func(): for i in range(2): for j in range(2): for k in range(2): for m in range(2): goto.end label.end return (i, j, k, m) assert func() == (0, 0, 0, 0) def test_jump_out_of_nested_5_loops(): def func(): for i in range(2): for j in range(2): for k in range(2): for m in range(2): for n in range(2): goto.end label.end return (i, j, k, m, n) pytest.raises(SyntaxError, with_goto, func) def test_jump_across_loops(): def func(): for i in range(10): goto.other_loop for i in range(10): label.other_loop pytest.raises(SyntaxError, with_goto, func) def test_jump_out_of_try_block(): @with_goto def func(): try: rv = None goto.end except: rv = 'except' finally: rv = 'finally' label.end return rv assert func() is None def test_jump_into_try_block(): def func(): try: label.block except: pass goto.block pytest.raises(SyntaxError, with_goto, func) def test_jump_to_unkown_label(): def func(): goto.unknown pytest.raises(SyntaxError, with_goto, func) def test_function_is_copy(): def func(): pass func.foo = 'bar' newfunc = with_goto(func) assert newfunc is not func assert newfunc.foo == 'bar'
py
1a44718a26355ab5a22c29a7e9a20cf8fdd3f390
import numpy as np import pandas as pd import tensorflow as tf import math from sklearn.cluster import KMeans import Loaddata from numpy import random import time from datetime import date import matplotlib.pyplot as plt import os from pandas import DataFrame, concat import multiprocessing as mp class LSTM_double: # 定义常量 def __init__(self, data): self.rnn_unit = 300 self.input_size = 100 self.output_size = 1 self.lr = 0.00006 self.time_step = 1 self.batch_size = 1 self.data = self.series_to_supervised(data, 100) self.train_begin = 0 self.train_end = len(self.data) self.test_begin = len(self.data)-1 self.weights = { 'in': tf.Variable(tf.random_normal([self.input_size, self.rnn_unit])), 'out': tf.Variable(tf.random_normal([self.rnn_unit, self.output_size])) } self.biases = { 'in': tf.Variable(tf.constant(0.1, shape=[self.rnn_unit, ])), 'out': tf.Variable(tf.constant(0.1, shape=[1, ])) } # 定义分割函数 def series_to_supervised(self, data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] agg = concat(cols, axis=1) agg.columns = names if dropnan: agg.dropna(inplace=True) return agg.values # 获取训练集 def get_train_data(self): batch_index = [] data_train = self.data[self.train_begin:self.train_end] normalized_train_data = data_train/1e8 train_x, train_y = [], [] # 训练集 for i in range(len(normalized_train_data)-self.time_step): if i % self.batch_size == 0: batch_index.append(i) x = normalized_train_data[i:i+self.time_step, :100] y = normalized_train_data[i:i+self.time_step, 100:] train_x.append(x.tolist()) train_y.append(y.tolist()) batch_index.append((len(normalized_train_data)-self.time_step)) return batch_index, train_x, train_y # 获取测试集 def get_test_data(self): data_test = self.data[self.test_begin:] normalized_test_data = data_test/1e8 size = (len(normalized_test_data) + self.time_step)//self.time_step # 有size个sample test_x, test_y = [], [] for i in range(size-1): x = normalized_test_data[i * self.time_step:(i+1)*self.time_step, :100] y = normalized_test_data[i * self.time_step:(i+1)*self.time_step, 100] test_x.append(x.tolist()) test_y.extend(y) test_x.append( (normalized_test_data[(i+1)*self.time_step:, :100]).tolist()) test_y.extend( (normalized_test_data[(i+1)*self.time_step:, 100]).tolist()) return test_x, test_y # ——————————————————定义神经网络变量—————————————————— def lstm(self, X): self.batch_size = tf.shape(X)[0] self.time_step = tf.shape(X)[1] w_in = self.weights['in'] b_in = self.biases['in'] # 将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 input = tf.reshape(X, [-1, self.input_size]) input_rnn = tf.matmul(input, w_in)+b_in # 将tensor转成3维,作为lstm cell的输入 input_rnn = tf.reshape(input_rnn, [-1, self.time_step, self.rnn_unit]) cell = tf.nn.rnn_cell.LSTMCell(self.rnn_unit) init_state = cell.zero_state(self.batch_size, dtype=tf.float32) # output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 output_rnn, final_states = tf.nn.dynamic_rnn( cell, input_rnn, initial_state=init_state, dtype=tf.float32) output = tf.reshape(output_rnn, [-1, self.rnn_unit]) # 作为输出层的输入 w_out = self.weights['out'] b_out = self.biases['out'] pred = tf.matmul(output, w_out)+b_out pred = tf.reshape(pred, [-1, self.output_size]) return pred, final_states # ——————————————————训练模型—————————————————— def train_lstm(self, num_epochs=40, numb_sub=1,numb_class=1,continue_train=False,class_people='purchase'): X = tf.placeholder(tf.float32, shape=[None, 1, 100]) Y = tf.placeholder(tf.float32, shape=[None, 1, 1]) batch_index, train_x, train_y = self.get_train_data() with tf.variable_scope("sec_lstm"): pred, _ = self.lstm(X) # 损失函数 loss = tf.reduce_mean( tf.square(tf.reshape(pred, [-1])-tf.reshape(Y, [-1]))) train_op = tf.train.AdamOptimizer(self.lr).minimize(loss) saver = tf.train.Saver(tf.global_variables(), max_to_keep=15) if continue_train==True: module_file = tf.train.latest_checkpoint('model_save_'+class_people+'_'+ str(numb_sub)+'_'+str(numb_class)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if continue_train==True: saver.restore(sess, module_file) # 重复训练 for i in range(num_epochs): for step in range(len(batch_index)-1): _, loss_ = sess.run([train_op, loss], feed_dict={ X: train_x[batch_index[step]:batch_index[step+1]], Y: train_y[batch_index[step]:batch_index[step+1]]}) print(i+1, loss_) if ((i+1) % num_epochs) == 0: print("保存模型:", saver.save(sess, 'model_save_'+class_people+'_' + str(numb_sub)+'_'+str(numb_class)+'/modle.ckpt', global_step=i)) # ————————————————预测模型———————————————————— def prediction(self, numb_sub=1,numb_class=1,class_people='purchase'): self.time_step = 1 self.input_size = 100 self.output_size = 1 X = tf.placeholder(tf.float32, shape=[ None, self.time_step, self.input_size]) Y = tf.placeholder(tf.float32, shape=[ None, self.time_step, self.output_size]) test_x, test_y = self.get_test_data() with tf.variable_scope("sec_lstm", reuse=tf.AUTO_REUSE): pred, _ = self.lstm(X) saver = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: # 参数恢复 module_file = tf.train.latest_checkpoint( 'model_save_'+class_people+'_'+str(numb_sub)+'_'+str(numb_class)) saver.restore(sess, module_file) test_x = test_x[:1] test_x = [a[0] for a in test_x] test_x = np.array(test_x) test_x[:, :99] = test_x[:, 1:] test_x[:, 99:] = test_y[-1] test_predict = [] for step in range(30): prob = sess.run(pred, feed_dict={X: [test_x]}) predict = prob.reshape(-1) test_predict.extend(prob) test_x[:, :99] = test_x[:, 1:] test_x[:, 99:] = prob[-1] test_predict = np.array(test_predict) test_predict = test_predict[:, 0] test_predict = test_predict.flatten() test_predict = np.array(test_predict)*1e8 print(test_predict) return test_predict class k_mean(object): def __init__(self, data): self.x_train = data def k_mean_divide(self, cluster_num): kmeans = KMeans(n_clusters=cluster_num, random_state=0).fit(self.x_train) divide_labels = kmeans.labels_ divide_class = {} for i in range(cluster_num): divide_answer = (divide_labels == i) divide = [] for j in range(len(divide_labels)): if divide_answer[j] == True: divide.append(j) divide_class['cluster'+str(i)] = np.array(divide)+1 return divide_class class genetic(object): def getEncoding(self, popSize, chromLength): # 生成种群 pop = random.randint(0, 2, size=(popSize, chromLength)) return pop def binary2decimal(self, pop, chromLength_type, chromLength): row = pop.shape[0] chromLength_length = len(chromLength_type) - 1 tempfinal = np.zeros((row, chromLength_length)) position_sum = np.cumsum(chromLength_type) for i in range(row): for j in range(chromLength_length): t = 0 for k in range(position_sum[j], position_sum[j+1]): t += pop[i, k]*(math.pow(2, k - position_sum[j])) tempfinal[i, j] = t tempfinal[:, 0] = tempfinal[:, 0]+1 tempfinal[:, 1:] = tempfinal[:, 1:]/(math.pow(2, 8)-1)*5 return tempfinal def multiprocess_fitness_purchase(self, j):# 并行计算 multiple_time = np.hstack((self.tempfinal[j, 1], np.tile( self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数 for k in range(4, self.tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, self.tempfinal[j, k])) user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0])) user_balance = Loaddata.UserBalance() purchase_predict_class = [] purchase_test_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类') user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList( divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataUsedInPredict() purchase_test, redeem_test = user_balance.GetTestData() purchase_model = LSTM_double(purchase_train.reshape((-1, 1))) purchase_model.train_lstm(numb_sub=j+1,numb_class=i+1) purchase_predict = purchase_model.prediction(numb_sub=j+1,numb_class=i+1) tf.reset_default_graph() plt.plot(purchase_predict, 'b') plt.plot(purchase_test, 'g') if not os.path.exists('out_lstm_double/'): os.makedirs('out_lstm_double/') plt.savefig('out_lstm_double/purchase_the_{}_times_the_{}_gene_the_{}_class.png'.format( str(self.times_calc), str(j+1), str(i+1))) plt.close() purchase_predict_class.append(purchase_predict) purchase_test_class.append(purchase_test) purchase_loss_value = np.mean(abs(np.array(purchase_predict_class).sum( axis=0) - np.array(purchase_test_class).sum(axis=0))/(np.array(purchase_test_class).sum(axis=0))) return 1/purchase_loss_value def fitness_purchase(self, tempfinal, user_profile_onehot, times_calc): # 适应度 self.user_profile_onehot = user_profile_onehot self.tempfinal = tempfinal self.times_calc = times_calc pool = mp.Pool(processes=tempfinal.shape[0]) purchase_loss_value = pool.map( self.multiprocess_fitness_purchase, range(tempfinal.shape[0])) pool.close() pool.join() return np.squeeze(purchase_loss_value) def fitness_predict_purchase(self,length_best, tempfinal, user_profile_onehot, user_balance): multiple_time = np.hstack((tempfinal[0, 1], np.tile( tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数 for k in range(4, tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, tempfinal[0, k])) user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0])) purchase_predict_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataAll() purchase_model = LSTM_double(purchase_train.reshape((-1, 1))) purchase_model.train_lstm(num_epochs = 10,numb_sub = length_best,numb_class=i+1,continue_train=True) purchase_predict = purchase_model.prediction(numb_sub=length_best,numb_class=i+1) tf.reset_default_graph() purchase_predict_class.append(purchase_predict) purchase_predict_return = np.array(purchase_predict_class).sum(axis=0) return purchase_predict_return def multiprocess_fitness_redeem(self, j): multiple_time = np.hstack((self.tempfinal[j, 1], np.tile( self.tempfinal[j, 2], 7), np.tile(self.tempfinal[j, 3], 12))) # 拼接倍数 for k in range(4, self.tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, self.tempfinal[j, k])) user_profile_onehot = self.user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(self.tempfinal[j, 0])) user_balance = Loaddata.UserBalance() redeem_predict_class = [] redeem_test_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 print('第'+str(j+1)+'个种群 第'+str(i+1)+'个类') user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) # 主要时间花在这里!!!! user_balance.CalculateDayRedeemList( divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataUsedInPredict() purchase_test, redeem_test = user_balance.GetTestData() redeem_model = LSTM_double(redeem_train.reshape((-1, 1))) redeem_model.lr = 0.0001 redeem_model.train_lstm(num_epochs=60, numb_sub=j+1,numb_class=i+1,class_people='redeem') redeem_predict = redeem_model.prediction(numb_sub=j+1,numb_class=i+1,class_people='redeem') tf.reset_default_graph() plt.plot(redeem_predict, 'b') plt.plot(redeem_test, 'g') plt.savefig('out_lstm_double/redeem_the_{}_times_the_{}_gene_the_{}_class.png'.format( str(self.times_calc), str(j+1), str(i+1))) plt.close() redeem_predict_class.append(redeem_predict) redeem_test_class.append(redeem_test) redeem_loss_value = np.mean(abs(np.array(redeem_predict_class).sum( axis=0) - np.array(redeem_test_class).sum(axis=0))/(np.array(redeem_test_class).sum(axis=0))) return 1/redeem_loss_value def fitness_redeem(self, tempfinal, user_profile_onehot, times_calc): # 适应度 self.user_profile_onehot = user_profile_onehot self.tempfinal = tempfinal self.times_calc = times_calc pool = mp.Pool(processes=tempfinal.shape[0]) redeem_loss_value = pool.map( self.multiprocess_fitness_redeem, range(tempfinal.shape[0])) pool.close() pool.join() return np.squeeze(redeem_loss_value) def fitness_predict_redeem(self,length_best, tempfinal, user_profile_onehot, user_balance): multiple_time = np.hstack((tempfinal[0, 1], np.tile( tempfinal[0, 2], 7), np.tile(tempfinal[0, 3], 12))) # 拼接倍数 for k in range(4, tempfinal.shape[1]): multiple_time = np.hstack((multiple_time, tempfinal[0, k])) user_profile_onehot = user_profile_onehot * multiple_time # 将部分向量的权重扩大 model_kmean = k_mean(user_profile_onehot) # 聚类 divide_class = model_kmean.k_mean_divide(int(tempfinal[0, 0])) redeem_predict_class = [] for i in range(len(divide_class)): # 将这几种分类分别带入网络识别 user_balance.CalculateDayPurchaseList( divide_class['cluster'+str(i)]) user_balance.CalculateDayRedeemList(divide_class['cluster'+str(i)]) purchase_train, redeem_train = user_balance.GetdataAll() # LSTM_double redeem_model = LSTM_double(redeem_train.reshape((-1, 1))) redeem_model.lr = 0.0001 redeem_model.train_lstm(num_epochs=10,numb_sub = length_best,numb_class=i+1,continue_train=True,class_people='redeem') redeem_predict = redeem_model.prediction(numb_sub = length_best,numb_class=i+1,class_people='redeem') tf.reset_default_graph() redeem_predict_class.append(redeem_predict) redeem_predict_return = np.array(redeem_predict_class).sum(axis=0) return redeem_predict_return def calfitValue(self, value): # 保证损失大于等于0 好像没什么必要的样子 for i in range(value.shape[0]): if value[i] < 0: value[i] = 0 return value def selection(self, pop, value): # 选择 newfitvalue = np.zeros((value.shape[0], 1)) totalValue = sum(value) accumalator = 0 j = 0 for i in value: # 轮盘赌 newValue = (i*1.0/totalValue) accumalator += newValue newfitvalue[j] = (accumalator) j = j+1 newfitvalue[j-1] = 1 ms = [] for i in range(value.shape[0]): ms.append(random.random()) ms.sort() fitin = 0 newin = 0 newpop = pop while newin < value.shape[0]: if(ms[newin] < newfitvalue[fitin]): newpop[newin] = pop[fitin] newin = newin+1 else: fitin = fitin+1 return newpop def crossover(self, pop, crossrate, chromLength): # 交叉 row = pop.shape[0]-1 # 确保有两个基因能够对位交叉 pop = pop.tolist() for i in range(0, row, 2): if(random.random() < crossrate): # 对基因块的不同部分进行交叉部位生成 singpoint = random.randint(chromLength) temp1 = [] temp2 = [] temp1.extend(pop[i][0:singpoint]) temp1.extend(pop[i + 1][singpoint:chromLength]) temp2.extend(pop[i + 1][0:singpoint]) temp2.extend(pop[i][singpoint:chromLength]) pop[i] = temp1 # 生成新子群 pop[i + 1] = temp2 pop = np.array(pop) return pop def mutation(self, pop, mutationrate, chromLength): # 变异 row = pop.shape[0] for i in range(row): if (random.random() < mutationrate): mpoint = random.randint(0, chromLength) # 变异部位 if(pop[i, mpoint] == 1): pop[i, mpoint] = 0 else: pop[i, mpoint] = 1 return pop def best(self, pop, value, chromLength): bestvalue = value.max() find_best = np.argmax(value) temp = pop[find_best, :].reshape((-1, chromLength)) return temp, bestvalue, find_best+1
py
1a44726739d497711fd061ee1e107a74c44062e5
"""python_template""" from python_template.main import hello_world
py
1a44732f608bf21c6b1caca7f94f93dd0c1f1777
# Purpose: using radius DIMENSION # Created: 10.11.2018 # Copyright (c) 2019-2020, Manfred Moitzi # License: MIT License import pathlib import math import ezdxf from ezdxf.math import Vec3, UCS import logging # ======================================== # Setup logging # ======================================== logging.basicConfig(level='WARNING') # ======================================== # Setup your preferred output directory # ======================================== OUTDIR = pathlib.Path('~/Desktop/Outbox').expanduser() if not OUTDIR.exists(): OUTDIR = pathlib.Path() # ======================================== # Default text attributes # ======================================== TEXT_ATTRIBS = { 'height': .25, 'style': ezdxf.options.default_dimension_text_style, } DIM_TEXT_STYLE = ezdxf.options.default_dimension_text_style # ======================================================= # Discarding dimension rendering is possible # for BricsCAD, but is incompatible to AutoCAD -> error # ======================================================= BRICSCAD = False def multiple_locations(delta=10, center=(0, 0)): cx, cy = center return [ (cx + delta, cy), (cx + delta, cy + delta), (cx, cy + delta), (cx - delta, cy + delta), (cx - delta, cy), (cx - delta, cy - delta), (cx, cy - delta), (cx + delta, cy - delta), ] def diameter_default_outside(dxfversion='R2000', delta=10): doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg msp.add_circle((x, y), radius=3) # Default DimStyle EZ_RADIUS: 1 drawing unit == 1m; scale 1: 100; length_factor=100 -> measurement in cm # closed filled arrow, size 0.25 # DIMSTYLE settings: # dimtmove = 1: use leader, is the best setting for text outside to preserve appearance of DIMENSION entity, # if editing afterwards in BricsCAD (AutoCAD) # center: specifies the center of the circle # radius: specifies the radius of the circle # angle: specifies the the orientation (angle) of the dimension line dim = msp.add_diameter_dim(center=(x, y), radius=3, angle=angle, dimstyle='EZ_RADIUS') # Necessary second step, to create the BLOCK entity with the DIMENSION geometry. # ezdxf supports DXF R2000 attributes for DXF R12 rendering, but they have to be applied by the DIMSTYLE override # feature, this additional attributes are not stored in the XDATA section of the DIMENSION entity, they are just # used to render the DIMENSION entity. # The return value `dim` is not a DIMENSION entity, instead a DimStyleOverride object is returned, the DIMENSION # entity is stored as dim.dimension, see also ezdxf.override.DimStyleOverride class. dim.render(discard=BRICSCAD) doc.set_modelspace_vport(height=3 * delta) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_default_outside.dxf') def diameter_default_inside(dxfversion='R2000', delta=10, dimtmove=0): def add_dim(x, y, dimtad): msp.add_circle((x, y), radius=3) dim = msp.add_diameter_dim(center=(x, y), radius=3, angle=angle, dimstyle='EZ_RADIUS_INSIDE', override={ 'dimtad': dimtad, }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) style = doc.dimstyles.get('EZ_RADIUS_INSIDE') style.dxf.dimtmove = dimtmove # Default DimStyle EZ_RADIUS_INSIDE: 1 drawing unit == 1m; scale 1: 100; length_factor=100 -> measurement in cm # closed filled arrow, size 0.25 # DIMSTYLE settings: # dimtmove = 0: keep dim line with text, is the best setting for text inside to preserve appearance of # DIMENSION entity, if editing afterwards in BricsCAD (AutoCAD) # dimtix = 1: force text inside # dimatfit = 0: force text inside, required by BricsCAD (AutoCAD) # dimtad = 0: center text vertical, BricsCAD (AutoCAD) always creates vertical centered text, # ezdxf let you choose the vertical placement (above, below, center), # but editing the DIMENSION in BricsCAD will reset text to center placement. msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, dimtad=1) # above add_dim(x + 3 * delta, y, dimtad=0) # center add_dim(x + 6 * delta, y, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_default_inside_dimtmove_{dimtmove}.dxf') def diameter_default_outside_horizontal(dxfversion='R2000', delta=10): def add_dim(x, y, dimtad): msp.add_circle((x, y), radius=3) dim = msp.add_diameter_dim(center=(x, y), radius=3, angle=angle, dimstyle='EZ_RADIUS', override={ 'dimtoh': 1, # force text outside horizontal 'dimtad': dimtad, }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, dimtad=1) # above add_dim(x + 3 * delta, y, dimtad=0) # center add_dim(x + 6 * delta, y, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0)) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_default_outside_horizontal.dxf') def diameter_default_inside_horizontal(dxfversion='R2000', delta=10, dimtmove=0): doc = ezdxf.new(dxfversion, setup=True) style = doc.dimstyles.get('EZ_RADIUS_INSIDE') style.dxf.dimtmove = dimtmove msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg msp.add_circle((x, y), radius=3) dim = msp.add_diameter_dim(center=(x, y), radius=3, angle=angle, dimstyle='EZ_RADIUS_INSIDE', override={ 'dimtih': 1, # force text inside horizontal }) dim.render(discard=BRICSCAD) doc.set_modelspace_vport(height=3 * delta) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_default_inside_horizontal_dimtmove_{dimtmove}.dxf') def diameter_user_defined_outside(dxfversion='R2000', delta=15): def add_dim(x, y, radius, dimtad): center = Vec3(x, y) msp.add_circle((x, y), radius=3) dim_location = center + Vec3.from_deg_angle(angle, radius) dim = msp.add_diameter_dim(center=(x, y), radius=3, location=dim_location, dimstyle='EZ_RADIUS', override={ 'dimtad': dimtad, }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, 5, dimtad=1) # above add_dim(x + 3 * delta, y, 5, dimtad=0) # center add_dim(x + 6 * delta, y, 5, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0)) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_user_defined_outside.dxf') def diameter_user_defined_outside_horizontal(dxfversion='R2000', delta=15): def add_dim(x, y, radius, dimtad): center = Vec3(x, y) msp.add_circle((x, y), radius=3) dim_location = center + Vec3.from_deg_angle(angle, radius) dim = msp.add_diameter_dim(center=(x, y), radius=3, location=dim_location, dimstyle='EZ_RADIUS', override={ 'dimtad': dimtad, 'dimtoh': 1, # force text outside horizontal }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, 5, dimtad=1) # above add_dim(x + 3 * delta, y, 5, dimtad=0) # center add_dim(x + 6 * delta, y, 5, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0)) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_user_defined_outside_horizontal.dxf') def diameter_user_defined_inside(dxfversion='R2000', delta=10, dimtmove=0): def add_dim(x, y, radius, dimtad): center = Vec3(x, y) msp.add_circle((x, y), radius=3) dim_location = center + Vec3.from_deg_angle(angle, radius) dim = msp.add_diameter_dim(center=(x, y), radius=3, location=dim_location, dimstyle='EZ_RADIUS', override={ 'dimtad': dimtad, }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) style = doc.dimstyles.get('EZ_RADIUS') style.dxf.dimtmove = dimtmove msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, 1, dimtad=1) # above add_dim(x + 3 * delta, y, 1, dimtad=0) # center add_dim(x + 6 * delta, y, 1, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0)) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_user_defined_inside_dimtmove_{dimtmove}.dxf') def diameter_user_defined_inside_horizontal(dxfversion='R2000', delta=10): def add_dim(x, y, radius, dimtad): center = Vec3(x, y) msp.add_circle((x, y), radius=3) dim_location = center + Vec3.from_deg_angle(angle, radius) dim = msp.add_diameter_dim(center=(x, y), radius=3, location=dim_location, dimstyle='EZ_RADIUS', override={ 'dimtad': dimtad, 'dimtih': 1, # force text inside horizontal }) dim.render(discard=BRICSCAD) doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): angle = Vec3(x, y).angle_deg add_dim(x, y, 1, dimtad=1) # above add_dim(x + 3 * delta, y, 1, dimtad=0) # center add_dim(x + 6 * delta, y, 1, dimtad=4) # below doc.set_modelspace_vport(height=3 * delta, center=(4.5 * delta, 0)) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_user_defined_inside_horizontal.dxf') def diameter_3d(dxfversion='R2000', delta=10): doc = ezdxf.new(dxfversion, setup=True) msp = doc.modelspace() for x, y in multiple_locations(delta=delta): ucs = UCS(origin=(x, y, 0)).rotate_local_x(math.radians(45)) angle = Vec3(x, y).angle_deg msp.add_circle((0, 0), radius=3).transform(ucs.matrix) dim = msp.add_diameter_dim(center=(0, 0), radius=3, angle=angle, dimstyle='EZ_RADIUS') dim.render(discard=BRICSCAD, ucs=ucs) doc.set_modelspace_vport(height=3 * delta) doc.saveas(OUTDIR / f'dim_diameter_{dxfversion}_3d.dxf') if __name__ == '__main__': diameter_default_outside() diameter_default_inside(dimtmove=0) # dimline from center diameter_default_inside(dimtmove=1) # dimline from text diameter_default_outside_horizontal() diameter_default_inside_horizontal(dimtmove=0) # dimline from center diameter_default_inside_horizontal(dimtmove=1) # dimline from text diameter_user_defined_outside() diameter_user_defined_outside_horizontal() diameter_user_defined_inside(dimtmove=0) # dimline from text, also for 1 diameter_user_defined_inside(dimtmove=2) # dimline from center diameter_user_defined_inside_horizontal() diameter_3d()
py
1a44736205f349383eb2b5f6af0bb8442bc6b997
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType import copy as _copy class Border(_BaseTraceHierarchyType): # class properties # -------------------- _parent_path_str = "pointcloud.marker" _path_str = "pointcloud.marker.border" _valid_props = {"arearatio", "color"} # arearatio # --------- @property def arearatio(self): """ Specifies what fraction of the marker area is covered with the border. The 'arearatio' property is a number and may be specified as: - An int or float in the interval [0, 1] Returns ------- int|float """ return self["arearatio"] @arearatio.setter def arearatio(self, val): self["arearatio"] = val # color # ----- @property def color(self): """ Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val # Self properties description # --------------------------- @property def _prop_descriptions(self): return """\ arearatio Specifies what fraction of the marker area is covered with the border. color Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. """ def __init__(self, arg=None, arearatio=None, color=None, **kwargs): """ Construct a new Border object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.pointcloud.marker.Border` arearatio Specifies what fraction of the marker area is covered with the border. color Sets the stroke color. It accepts a specific color. If the color is not fully opaque and there are hundreds of thousands of points, it may cause slower zooming and panning. Returns ------- Border """ super(Border, self).__init__("border") if "_parent" in kwargs: self._parent = kwargs["_parent"] return # Validate arg # ------------ if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError( """\ The first argument to the plotly.graph_objs.pointcloud.marker.Border constructor must be a dict or an instance of :class:`plotly.graph_objs.pointcloud.marker.Border`""" ) # Handle skip_invalid # ------------------- self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) # Populate data dict with properties # ---------------------------------- _v = arg.pop("arearatio", None) _v = arearatio if arearatio is not None else _v if _v is not None: self["arearatio"] = _v _v = arg.pop("color", None) _v = color if color is not None else _v if _v is not None: self["color"] = _v # Process unknown kwargs # ---------------------- self._process_kwargs(**dict(arg, **kwargs)) # Reset skip_invalid # ------------------ self._skip_invalid = False
py
1a447468bf389347adac854bf1936d8d244444d4
from array import array from functools import partial import traceback import importlib from enum import Enum import dask from dask.base import normalize_token import msgpack from . import pickle from ..utils import has_keyword, typename, ensure_bytes from .compression import maybe_compress, decompress from .utils import ( unpack_frames, pack_frames_prelude, frame_split_size, msgpack_opts, ) lazy_registrations = {} dask_serialize = dask.utils.Dispatch("dask_serialize") dask_deserialize = dask.utils.Dispatch("dask_deserialize") _cached_allowed_modules = {} def dask_dumps(x, context=None): """Serialize object using the class-based registry""" type_name = typename(type(x)) try: dumps = dask_serialize.dispatch(type(x)) except TypeError: raise NotImplementedError(type_name) if has_keyword(dumps, "context"): header, frames = dumps(x, context=context) else: header, frames = dumps(x) header["type"] = type_name header["type-serialized"] = pickle.dumps(type(x), protocol=4) header["serializer"] = "dask" return header, frames def dask_loads(header, frames): typ = pickle.loads(header["type-serialized"]) loads = dask_deserialize.dispatch(typ) return loads(header, frames) def pickle_dumps(x, context=None): frames = [None] buffer_callback = lambda f: frames.append(memoryview(f)) frames[0] = pickle.dumps( x, buffer_callback=buffer_callback, protocol=context.get("pickle-protocol", None) if context else None, ) header = { "serializer": "pickle", "writeable": tuple(not f.readonly for f in frames[1:]), } return header, frames def pickle_loads(header, frames): x, buffers = frames[0], frames[1:] writeable = header["writeable"] for i in range(len(buffers)): mv = memoryview(buffers[i]) if writeable[i] == mv.readonly: if mv.readonly: buffers[i] = memoryview(bytearray(mv)).cast(mv.format, mv.shape) else: buffers[i] = memoryview(bytes(mv)).cast(mv.format, mv.shape) return pickle.loads(x, buffers=buffers) def import_allowed_module(name): if name in _cached_allowed_modules: return _cached_allowed_modules[name] # Check for non-ASCII characters name = name.encode("ascii").decode() # We only compare the root module root = name.split(".", 1)[0] # Note, if an empty string creeps into allowed-imports it is disallowed explicitly if root and root in dask.config.get("distributed.scheduler.allowed-imports"): _cached_allowed_modules[name] = importlib.import_module(name) return _cached_allowed_modules[name] else: raise RuntimeError( f"Importing {repr(name)} is not allowed, please add it to the list of " "allowed modules the scheduler can import via the " "distributed.scheduler.allowed-imports configuration setting." ) def msgpack_decode_default(obj): """ Custom packer/unpacker for msgpack """ if "__Enum__" in obj: mod = import_allowed_module(obj["__module__"]) typ = getattr(mod, obj["__name__"]) return getattr(typ, obj["name"]) if "__Set__" in obj: return set(obj["as-list"]) if "__Serialized__" in obj: # Notice, the data here is marked a Serialized rather than deserialized. This # is because deserialization requires Pickle which the Scheduler cannot run # because of security reasons. # By marking it Serialized, the data is passed through to the workers that # eventually will deserialize it. return Serialized(*obj["data"]) return obj def msgpack_encode_default(obj): """ Custom packer/unpacker for msgpack """ if isinstance(obj, Serialize): return {"__Serialized__": True, "data": serialize(obj.data)} if isinstance(obj, Enum): return { "__Enum__": True, "name": obj.name, "__module__": obj.__module__, "__name__": type(obj).__name__, } if isinstance(obj, set): return {"__Set__": True, "as-list": list(obj)} return obj def msgpack_dumps(x): try: frame = msgpack.dumps(x, use_bin_type=True) except Exception: raise NotImplementedError() else: return {"serializer": "msgpack"}, [frame] def msgpack_loads(header, frames): return msgpack.loads(b"".join(frames), use_list=False, **msgpack_opts) def serialization_error_loads(header, frames): msg = "\n".join([ensure_bytes(frame).decode("utf8") for frame in frames]) raise TypeError(msg) families = {} def register_serialization_family(name, dumps, loads): families[name] = (dumps, loads, dumps and has_keyword(dumps, "context")) register_serialization_family("dask", dask_dumps, dask_loads) register_serialization_family("pickle", pickle_dumps, pickle_loads) register_serialization_family("msgpack", msgpack_dumps, msgpack_loads) register_serialization_family("error", None, serialization_error_loads) def check_dask_serializable(x): if type(x) in (list, set, tuple) and len(x): return check_dask_serializable(next(iter(x))) elif type(x) is dict and len(x): return check_dask_serializable(next(iter(x.items()))[1]) else: try: dask_serialize.dispatch(type(x)) return True except TypeError: pass return False def serialize(x, serializers=None, on_error="message", context=None): r""" Convert object to a header and list of bytestrings This takes in an arbitrary Python object and returns a msgpack serializable header and a list of bytes or memoryview objects. The serialization protocols to use are configurable: a list of names define the set of serializers to use, in order. These names are keys in the ``serializer_registry`` dict (e.g., 'pickle', 'msgpack'), which maps to the de/serialize functions. The name 'dask' is special, and will use the per-class serialization methods. ``None`` gives the default list ``['dask', 'pickle']``. Examples -------- >>> serialize(1) ({}, [b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.']) >>> serialize(b'123') # some special types get custom treatment ({'type': 'builtins.bytes'}, [b'123']) >>> deserialize(*serialize(1)) 1 Returns ------- header: dictionary containing any msgpack-serializable metadata frames: list of bytes or memoryviews, commonly of length one See Also -------- deserialize : Convert header and frames back to object to_serialize : Mark that data in a message should be serialized register_serialization : Register custom serialization functions """ if serializers is None: serializers = ("dask", "pickle") # TODO: get from configuration if isinstance(x, Serialized): return x.header, x.frames if type(x) in (list, set, tuple, dict): iterate_collection = False if type(x) is list and "msgpack" in serializers: # Note: "msgpack" will always convert lists to tuples # (see GitHub #3716), so we should iterate # through the list if "msgpack" comes before "pickle" # in the list of serializers. iterate_collection = ("pickle" not in serializers) or ( serializers.index("pickle") > serializers.index("msgpack") ) if not iterate_collection: # Check for "dask"-serializable data in dict/list/set iterate_collection = check_dask_serializable(x) # Determine whether keys are safe to be serialized with msgpack if type(x) is dict and iterate_collection: try: msgpack.dumps(list(x.keys())) except Exception: dict_safe = False else: dict_safe = True if ( type(x) in (list, set, tuple) and iterate_collection or type(x) is dict and iterate_collection and dict_safe ): if isinstance(x, dict): headers_frames = [] for k, v in x.items(): _header, _frames = serialize( v, serializers=serializers, on_error=on_error, context=context ) _header["key"] = k headers_frames.append((_header, _frames)) else: headers_frames = [ serialize( obj, serializers=serializers, on_error=on_error, context=context ) for obj in x ] frames = [] lengths = [] compressions = [] for _header, _frames in headers_frames: frames.extend(_frames) length = len(_frames) lengths.append(length) compressions.extend(_header.get("compression") or [None] * len(_frames)) headers = [obj[0] for obj in headers_frames] headers = { "sub-headers": headers, "is-collection": True, "frame-lengths": lengths, "type-serialized": type(x).__name__, } if any(compression is not None for compression in compressions): headers["compression"] = compressions return headers, frames tb = "" for name in serializers: dumps, loads, wants_context = families[name] try: header, frames = dumps(x, context=context) if wants_context else dumps(x) header["serializer"] = name return header, frames except NotImplementedError: continue except Exception as e: tb = traceback.format_exc() break msg = "Could not serialize object of type %s." % type(x).__name__ if on_error == "message": frames = [msg] if tb: frames.append(tb[:100000]) frames = [frame.encode() for frame in frames] return {"serializer": "error"}, frames elif on_error == "raise": raise TypeError(msg, str(x)[:10000]) def deserialize(header, frames, deserializers=None): """ Convert serialized header and list of bytestrings back to a Python object Parameters ---------- header : dict frames : list of bytes deserializers : Optional[Dict[str, Tuple[Callable, Callable, bool]]] An optional dict mapping a name to a (de)serializer. See `dask_serialize` and `dask_deserialize` for more. See Also -------- serialize """ if "is-collection" in header: headers = header["sub-headers"] lengths = header["frame-lengths"] cls = {"tuple": tuple, "list": list, "set": set, "dict": dict}[ header["type-serialized"] ] start = 0 if cls is dict: d = {} for _header, _length in zip(headers, lengths): k = _header.pop("key") d[k] = deserialize( _header, frames[start : start + _length], deserializers=deserializers, ) start += _length return d else: lst = [] for _header, _length in zip(headers, lengths): lst.append( deserialize( _header, frames[start : start + _length], deserializers=deserializers, ) ) start += _length return cls(lst) name = header.get("serializer") if deserializers is not None and name not in deserializers: raise TypeError( "Data serialized with %s but only able to deserialize " "data with %s" % (name, str(list(deserializers))) ) dumps, loads, wants_context = families[name] return loads(header, frames) def serialize_and_split(x, serializers=None, on_error="message", context=None): """Serialize and split compressable frames This function is a drop-in replacement of `serialize()` that calls `serialize()` followed by `frame_split_size()` on frames that should be compressed. Use `merge_and_deserialize()` to merge and deserialize the frames back. See Also -------- serialize merge_and_deserialize """ header, frames = serialize(x, serializers, on_error, context) num_sub_frames = [] offsets = [] out_frames = [] out_compression = [] for frame, compression in zip( frames, header.get("compression") or [None] * len(frames) ): if compression is None: # default behavior sub_frames = frame_split_size(frame) num_sub_frames.append(len(sub_frames)) offsets.append(len(out_frames)) out_frames.extend(sub_frames) out_compression.extend([None] * len(sub_frames)) else: num_sub_frames.append(1) offsets.append(len(out_frames)) out_frames.append(frame) out_compression.append(compression) assert len(out_compression) == len(out_frames) # Notice, in order to match msgpack's implicit convertion to tuples, # we convert to tuples here as well. header["split-num-sub-frames"] = tuple(num_sub_frames) header["split-offsets"] = tuple(offsets) header["compression"] = tuple(out_compression) return header, out_frames def merge_and_deserialize(header, frames, deserializers=None): """Merge and deserialize frames This function is a drop-in replacement of `deserialize()` that merges frames that were split by `serialize_and_split()` See Also -------- deserialize serialize_and_split """ merged_frames = [] if "split-num-sub-frames" not in header: merged_frames = frames else: for n, offset in zip(header["split-num-sub-frames"], header["split-offsets"]): if n == 1: merged_frames.append(frames[offset]) else: merged_frames.append(bytearray().join(frames[offset : offset + n])) return deserialize(header, merged_frames, deserializers=deserializers) class Serialize: """Mark an object that should be serialized Examples -------- >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> msg # doctest: +SKIP {'op': 'update', 'data': <Serialize: 123>} See also -------- distributed.protocol.dumps """ def __init__(self, data): self.data = data def __repr__(self): return "<Serialize: %s>" % str(self.data) def __eq__(self, other): return isinstance(other, Serialize) and other.data == self.data def __ne__(self, other): return not (self == other) def __hash__(self): return hash(self.data) to_serialize = Serialize class Serialized: """ An object that is already serialized into header and frames Normal serialization operations pass these objects through. This is typically used within the scheduler which accepts messages that contain data without actually unpacking that data. """ def __init__(self, header, frames): self.header = header self.frames = frames def __eq__(self, other): return ( isinstance(other, Serialized) and other.header == self.header and other.frames == self.frames ) def __ne__(self, other): return not (self == other) def extract_serialize(x) -> tuple: """Pull out Serialize objects from message This also remove large bytestrings from the message into a second dictionary. Examples -------- >>> from distributed.protocol import to_serialize >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> extract_serialize(msg) ({'op': 'update'}, {('data',): <Serialize: 123>}, set()) """ typ_x: type = type(x) if typ_x is dict: x_d: dict = x x_items = x_d.items() x2 = {} elif typ_x is list: x_l: list = x x_items = enumerate(x_l) x2 = len(x_l) * [None] ser = {} bytestrings = set() path = () _extract_serialize(x_items, x2, ser, bytestrings, path) return x2, ser, bytestrings def _extract_serialize(x_items, x2, ser: dict, bytestrings: set, path: tuple) -> None: for k, v in x_items: path_k = path + (k,) typ_v: type = type(v) if typ_v is dict: v_d: dict = v v_items = v_d.items() x2[k] = v2 = {} _extract_serialize(v_items, v2, ser, bytestrings, path_k) elif typ_v is list: v_l: list = v v_items = enumerate(v_l) x2[k] = v2 = len(v_l) * [None] _extract_serialize(v_items, v2, ser, bytestrings, path_k) elif typ_v is Serialize or typ_v is Serialized: ser[path_k] = v elif typ_v is bytes: v_b: bytes = v if len(v_b) > 2 ** 16: ser[path_k] = to_serialize(v_b) bytestrings.add(path_k) else: x2[k] = v_b elif typ_v is bytearray: v_ba: bytearray = v if len(v_ba) > 2 ** 16: ser[path_k] = to_serialize(v_ba) bytestrings.add(path_k) else: x2[k] = v_ba else: x2[k] = v def nested_deserialize(x): """ Replace all Serialize and Serialized values nested in *x* with the original values. Returns a copy of *x*. >>> msg = {'op': 'update', 'data': to_serialize(123)} >>> nested_deserialize(msg) {'op': 'update', 'data': 123} """ def replace_inner(x): if type(x) is dict: x = x.copy() for k, v in x.items(): typ = type(v) if typ is dict or typ is list: x[k] = replace_inner(v) elif typ is Serialize: x[k] = v.data elif typ is Serialized: x[k] = deserialize(v.header, v.frames) elif type(x) is list: x = list(x) for k, v in enumerate(x): typ = type(v) if typ is dict or typ is list: x[k] = replace_inner(v) elif typ is Serialize: x[k] = v.data elif typ is Serialized: x[k] = deserialize(v.header, v.frames) return x return replace_inner(x) def serialize_bytelist(x, **kwargs): header, frames = serialize_and_split(x, **kwargs) if frames: compression, frames = zip(*map(maybe_compress, frames)) else: compression = [] header["compression"] = compression header["count"] = len(frames) header = msgpack.dumps(header, use_bin_type=True) frames2 = [header, *frames] frames2.insert(0, pack_frames_prelude(frames2)) return frames2 def serialize_bytes(x, **kwargs): L = serialize_bytelist(x, **kwargs) return b"".join(L) def deserialize_bytes(b): frames = unpack_frames(b) header, frames = frames[0], frames[1:] if header: header = msgpack.loads(header, raw=False, use_list=False) else: header = {} frames = decompress(header, frames) return merge_and_deserialize(header, frames) ################################ # Class specific serialization # ################################ def register_serialization(cls, serialize, deserialize): """Register a new class for dask-custom serialization Parameters ---------- cls : type serialize : callable(cls) -> Tuple[Dict, List[bytes]] deserialize : callable(header: Dict, frames: List[bytes]) -> cls Examples -------- >>> class Human: ... def __init__(self, name): ... self.name = name >>> def serialize(human): ... header = {} ... frames = [human.name.encode()] ... return header, frames >>> def deserialize(header, frames): ... return Human(frames[0].decode()) >>> register_serialization(Human, serialize, deserialize) >>> serialize(Human('Alice')) ({}, [b'Alice']) See Also -------- serialize deserialize """ if isinstance(cls, str): raise TypeError( "Strings are no longer accepted for type registration. " "Use dask_serialize.register_lazy instead" ) dask_serialize.register(cls)(serialize) dask_deserialize.register(cls)(deserialize) def register_serialization_lazy(toplevel, func): """Register a registration function to be called if *toplevel* module is ever loaded. """ raise Exception("Serialization registration has changed. See documentation") @partial(normalize_token.register, Serialized) def normalize_Serialized(o): return [o.header] + o.frames # for dask.base.tokenize # Teach serialize how to handle bytes @dask_serialize.register(bytes) def _serialize_bytes(obj): header = {} # no special metadata frames = [obj] return header, frames # Teach serialize how to handle bytestrings @dask_serialize.register(bytearray) def _serialize_bytearray(obj): header = {} # no special metadata frames = [obj] return header, frames @dask_deserialize.register(bytes) def _deserialize_bytes(header, frames): if len(frames) == 1 and isinstance(frames[0], bytes): return frames[0] else: return bytes().join(frames) @dask_deserialize.register(bytearray) def _deserialize_bytearray(header, frames): if len(frames) == 1 and isinstance(frames[0], bytearray): return frames[0] else: return bytearray().join(frames) @dask_serialize.register(array) def _serialize_array(obj): header = {"typecode": obj.typecode, "writeable": (None,)} frames = [memoryview(obj)] return header, frames @dask_deserialize.register(array) def _deserialize_array(header, frames): a = array(header["typecode"]) for f in map(memoryview, frames): try: f = f.cast("B") except TypeError: f = f.tobytes() a.frombytes(f) return a @dask_serialize.register(memoryview) def _serialize_memoryview(obj): if obj.format == "O": raise ValueError("Cannot serialize `memoryview` containing Python objects") header = {"format": obj.format, "shape": obj.shape} frames = [obj] return header, frames @dask_deserialize.register(memoryview) def _deserialize_memoryview(header, frames): if len(frames) == 1: out = memoryview(frames[0]).cast("B") else: out = memoryview(b"".join(frames)) out = out.cast(header["format"], header["shape"]) return out ######################### # Descend into __dict__ # ######################### def _is_msgpack_serializable(v): typ = type(v) return ( v is None or typ is str or typ is bool or typ is int or typ is float or isinstance(v, dict) and all(map(_is_msgpack_serializable, v.values())) and all(typ is str for x in v.keys()) or isinstance(v, (list, tuple)) and all(map(_is_msgpack_serializable, v)) ) class ObjectDictSerializer: def __init__(self, serializer): self.serializer = serializer def serialize(self, est): header = { "serializer": self.serializer, "type-serialized": pickle.dumps(type(est), protocol=4), "simple": {}, "complex": {}, } frames = [] if isinstance(est, dict): d = est else: d = est.__dict__ for k, v in d.items(): if _is_msgpack_serializable(v): header["simple"][k] = v else: if isinstance(v, dict): h, f = self.serialize(v) else: h, f = serialize(v, serializers=(self.serializer, "pickle")) header["complex"][k] = { "header": h, "start": len(frames), "stop": len(frames) + len(f), } frames += f return header, frames def deserialize(self, header, frames): cls = pickle.loads(header["type-serialized"]) if issubclass(cls, dict): dd = obj = {} else: obj = object.__new__(cls) dd = obj.__dict__ dd.update(header["simple"]) for k, d in header["complex"].items(): h = d["header"] f = frames[d["start"] : d["stop"]] v = deserialize(h, f) dd[k] = v return obj dask_object_with_dict_serializer = ObjectDictSerializer("dask") dask_deserialize.register(dict)(dask_object_with_dict_serializer.deserialize) def register_generic( cls, serializer_name="dask", serialize_func=dask_serialize, deserialize_func=dask_deserialize, ): """Register (de)serialize to traverse through __dict__ Normally when registering new classes for Dask's custom serialization you need to manage headers and frames, which can be tedious. If all you want to do is traverse through your object and apply serialize to all of your object's attributes then this function may provide an easier path. This registers a class for the custom Dask serialization family. It serializes it by traversing through its __dict__ of attributes and applying ``serialize`` and ``deserialize`` recursively. It collects a set of frames and keeps small attributes in the header. Deserialization reverses this process. This is a good idea if the following hold: 1. Most of the bytes of your object are composed of data types that Dask's custom serializtion already handles well, like Numpy arrays. 2. Your object doesn't require any special constructor logic, other than object.__new__(cls) Examples -------- >>> import sklearn.base >>> from distributed.protocol import register_generic >>> register_generic(sklearn.base.BaseEstimator) See Also -------- dask_serialize dask_deserialize """ object_with_dict_serializer = ObjectDictSerializer(serializer_name) serialize_func.register(cls)(object_with_dict_serializer.serialize) deserialize_func.register(cls)(object_with_dict_serializer.deserialize)
py
1a447482b8dcff4dbd24a4b7a534d8910e97a9ea
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'digital_cv_project.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
py
1a447575882474afa8a029798cccb3ac242e4a45
list_ = input() # list_ = "день победы 1945 года 9 мая" list_01 = list_.split(' ') num_ = [] for i in list_01: if i.isdigit(): # условие должно быть [True], можно не прописывать # print(list_01) num_.append(int(i)) # print(num_) num_.sort() # не нужно создавать новый массив, преобразует (сказано было в теоретической части) print(num_)
py
1a4476f1bc8ffe6dc925d5d359a65dd5e1f32c50
from output.models.ms_data.regex.regex_test_5_xsd.regex_test_5 import Doc __all__ = [ "Doc", ]
py
1a4477ea9a3ddcd79d70f0de3327ab8d45e9ebd6
import inspect import os import shutil import subprocess import stat import sys import tarfile import time import zipfile def install_requirements(what): old_path = sys.path[:] w = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) sys.path.insert(0, os.path.dirname(os.path.dirname(w))) try: from setup import EXTRAS_REQUIRE, read finally: sys.path = old_path requirements = ['mock>=2.0.0', 'flake8', 'pytest', 'pytest-cov'] if what == 'all' else ['behave'] requirements += ['psycopg2-binary', 'coverage'] for r in read('requirements.txt').split('\n'): r = r.strip() if r != '': extras = {e for e, v in EXTRAS_REQUIRE.items() if v and r.startswith(v[0])} if not extras or what == 'all' or what in extras: requirements.append(r) subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'pip']) r = subprocess.call([sys.executable, '-m', 'pip', 'install'] + requirements) s = subprocess.call([sys.executable, '-m', 'pip', 'install', '--upgrade', 'setuptools']) return s | r def install_packages(what): packages = { 'zookeeper': ['zookeeper', 'zookeeper-bin', 'zookeeperd'], 'consul': ['consul'], } packages['exhibitor'] = packages['zookeeper'] packages = packages.get(what, []) ver = str({'etcd': '9.6', 'etcd3': '13', 'consul': 12, 'exhibitor': 11, 'kubernetes': 13, 'raft': 12}.get(what)) subprocess.call(['sudo', 'apt-get', 'update', '-y']) return subprocess.call(['sudo', 'apt-get', 'install', '-y', 'postgresql-' + ver, 'expect-dev', 'wget'] + packages) def get_file(url, name): try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve print('Downloading ' + url) urlretrieve(url, name) def untar(archive, name): with tarfile.open(archive) as tar: f = tar.extractfile(name) dest = os.path.basename(name) with open(dest, 'wb') as d: shutil.copyfileobj(f, d) return dest def unzip(archive, name): with zipfile.ZipFile(archive, 'r') as z: name = z.extract(name) dest = os.path.basename(name) shutil.move(name, dest) return dest def unzip_all(archive): print('Extracting ' + archive) with zipfile.ZipFile(archive, 'r') as z: z.extractall() def chmod_755(name): os.chmod(name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) def unpack(archive, name): print('Extracting {0} from {1}'.format(name, archive)) func = unzip if archive.endswith('.zip') else untar name = func(archive, name) chmod_755(name) return name def install_etcd(): version = os.environ.get('ETCDVERSION', '3.3.13') platform = {'linux2': 'linux', 'win32': 'windows', 'cygwin': 'windows'}.get(sys.platform, sys.platform) dirname = 'etcd-v{0}-{1}-amd64'.format(version, platform) ext = 'tar.gz' if platform == 'linux' else 'zip' name = '{0}.{1}'.format(dirname, ext) url = 'https://github.com/etcd-io/etcd/releases/download/v{0}/{1}'.format(version, name) get_file(url, name) ext = '.exe' if platform == 'windows' else '' return int(unpack(name, '{0}/etcd{1}'.format(dirname, ext)) is None) def install_postgres(): version = os.environ.get('PGVERSION', '12.1-1') platform = {'darwin': 'osx', 'win32': 'windows-x64', 'cygwin': 'windows-x64'}[sys.platform] name = 'postgresql-{0}-{1}-binaries.zip'.format(version, platform) get_file('http://get.enterprisedb.com/postgresql/' + name, name) unzip_all(name) bin_dir = os.path.join('pgsql', 'bin') for f in os.listdir(bin_dir): chmod_755(os.path.join(bin_dir, f)) subprocess.call(['pgsql/bin/postgres', '-V']) return 0 def setup_kubernetes(): get_file('https://storage.googleapis.com/minikube/k8sReleases/v1.7.0/localkube-linux-amd64', 'localkube') chmod_755('localkube') devnull = open(os.devnull, 'w') subprocess.Popen(['sudo', 'nohup', './localkube', '--logtostderr=true', '--enable-dns=false'], stdout=devnull, stderr=devnull) for _ in range(0, 120): if subprocess.call(['wget', '-qO', '-', 'http://127.0.0.1:8080/'], stdout=devnull, stderr=devnull) == 0: break time.sleep(1) else: print('localkube did not start') return 1 subprocess.call('sudo chmod 644 /var/lib/localkube/certs/*', shell=True) print('Set up .kube/config') kube = os.path.join(os.path.expanduser('~'), '.kube') os.makedirs(kube) with open(os.path.join(kube, 'config'), 'w') as f: f.write("""apiVersion: v1 clusters: - cluster: certificate-authority: /var/lib/localkube/certs/ca.crt server: https://127.0.0.1:8443 name: local contexts: - context: cluster: local user: myself name: local current-context: local kind: Config preferences: {} users: - name: myself user: client-certificate: /var/lib/localkube/certs/apiserver.crt client-key: /var/lib/localkube/certs/apiserver.key """) return 0 def main(): what = os.environ.get('DCS', sys.argv[1] if len(sys.argv) > 1 else 'all') if what != 'all': if sys.platform.startswith('linux'): r = install_packages(what) if r == 0 and what == 'kubernetes': r = setup_kubernetes() else: r = install_postgres() if r == 0 and what.startswith('etcd'): r = install_etcd() if r != 0: return r return install_requirements(what) if __name__ == '__main__': sys.exit(main())
py
1a4478201768cd6884edb75962764ab5b8518a2d
import tempfile from pathlib import Path import argparse import shutil import os import glob import cv2 import cog from run import run_cmd from datetime import datetime class Predictor(cog.Predictor): def setup(self): parser = argparse.ArgumentParser() parser.add_argument( "--input_folder", type=str, default="input/cog_temp"+ str(datetime.utcnow().timestamp()), help="Test images" ) parser.add_argument( "--output_folder", type=str, default="output"+ str(datetime.utcnow().timestamp()), help="Restored images, please use the absolute path", ) parser.add_argument("--GPU", type=str, default="0", help="0,1,2") parser.add_argument( "--checkpoint_name", type=str, default="Setting_9_epoch_100", help="choose which checkpoint", ) self.opts = parser.parse_args("") self.basepath = os.getcwd() self.opts.input_folder = os.path.join(self.basepath, self.opts.input_folder) self.opts.output_folder = os.path.join(self.basepath, self.opts.output_folder) os.makedirs(self.opts.input_folder, exist_ok=True) os.makedirs(self.opts.output_folder, exist_ok=True) @cog.input("image", type=Path, help="input image") @cog.input( "HR", type=bool, default=False, help="whether the input image is high-resolution", ) @cog.input( "with_scratch", type=bool, default=False, help="whether the input image is scratched", ) def predict(self, image, HR=False, with_scratch=False): try: os.chdir(self.basepath) input_path = os.path.join(self.opts.input_folder, os.path.basename(image)) shutil.copy(str(image), input_path) gpu1 = self.opts.GPU ## Stage 1: Overall Quality Improve print("Running Stage 1: Overall restoration") os.chdir("./Global") stage_1_input_dir = self.opts.input_folder stage_1_output_dir = os.path.join( self.opts.output_folder, "stage_1_restore_output" ) os.makedirs(stage_1_output_dir, exist_ok=True) if not with_scratch: stage_1_command = ( "python test.py --test_mode Full --Quality_restore --test_input " + stage_1_input_dir + " --outputs_dir " + stage_1_output_dir + " --gpu_ids " + gpu1 ) run_cmd(stage_1_command) else: mask_dir = os.path.join(stage_1_output_dir, "masks") new_input = os.path.join(mask_dir, "input") new_mask = os.path.join(mask_dir, "mask") stage_1_command_1 = ( "python detection.py --test_path " + stage_1_input_dir + " --output_dir " + mask_dir + " --input_size full_size" + " --GPU " + gpu1 ) if HR: HR_suffix = " --HR" else: HR_suffix = "" stage_1_command_2 = ( "python test.py --Scratch_and_Quality_restore --test_input " + new_input + " --test_mask " + new_mask + " --outputs_dir " + stage_1_output_dir + " --gpu_ids " + gpu1 + HR_suffix ) run_cmd(stage_1_command_1) run_cmd(stage_1_command_2) ## Solve the case when there is no face in the old photo stage_1_results = os.path.join(stage_1_output_dir, "restored_image") stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") os.makedirs(stage_4_output_dir, exist_ok=True) for x in os.listdir(stage_1_results): img_dir = os.path.join(stage_1_results, x) shutil.copy(img_dir, stage_4_output_dir) print("Finish Stage 1 ...") print("\n") ## Stage 2: Face Detection print("Running Stage 2: Face Detection") os.chdir(".././Face_Detection") stage_2_input_dir = os.path.join(stage_1_output_dir, "restored_image") stage_2_output_dir = os.path.join( self.opts.output_folder, "stage_2_detection_output" ) os.makedirs(stage_2_output_dir, exist_ok=True) stage_2_command = ( "python detect_all_dlib_HR.py --url " + stage_2_input_dir + " --save_url " + stage_2_output_dir ) run_cmd(stage_2_command) print("Finish Stage 2 ...") print("\n") ## Stage 3: Face Restore print("Running Stage 3: Face Enhancement") os.chdir(".././Face_Enhancement") stage_3_input_mask = "./" stage_3_input_face = stage_2_output_dir stage_3_output_dir = os.path.join( self.opts.output_folder, "stage_3_face_output" ) os.makedirs(stage_3_output_dir, exist_ok=True) self.opts.checkpoint_name = "FaceSR_512" stage_3_command = ( "python test_face.py --old_face_folder " + stage_3_input_face + " --old_face_label_folder " + stage_3_input_mask + " --tensorboard_log --name " + self.opts.checkpoint_name + " --gpu_ids " + gpu1 + " --load_size 512 --label_nc 18 --no_instance --preprocess_mode resize --batchSize 1 --results_dir " + stage_3_output_dir + " --no_parsing_map" ) run_cmd(stage_3_command) print("Finish Stage 3 ...") print("\n") ## Stage 4: Warp back print("Running Stage 4: Blending") os.chdir(".././Face_Detection") stage_4_input_image_dir = os.path.join(stage_1_output_dir, "restored_image") stage_4_input_face_dir = os.path.join(stage_3_output_dir, "each_img") stage_4_output_dir = os.path.join(self.opts.output_folder, "final_output") os.makedirs(stage_4_output_dir, exist_ok=True) stage_4_command = ( "python align_warp_back_multiple_dlib_HR.py --origin_url " + stage_4_input_image_dir + " --replace_url " + stage_4_input_face_dir + " --save_url " + stage_4_output_dir ) run_cmd(stage_4_command) print("Finish Stage 4 ...") print("\n") print("All the processing is done. Please check the results.") final_output = os.listdir(os.path.join(self.opts.output_folder, "final_output"))[0] image_restore = cv2.imread(os.path.join(self.opts.output_folder, "final_output", final_output)) out_path = Path(tempfile.mkdtemp()) / "out.png" cv2.imwrite(str(out_path), image_restore) finally: clean_folder(self.opts.input_folder) clean_folder(self.opts.output_folder) return out_path def clean_folder(folder): for filename in os.listdir(folder): file_path = os.path.join(folder, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print(f"Failed to delete {file_path}. Reason:{e}")
py
1a4478dd62d09d6009712ed7493af71b9c01a605
#!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT License. # """The implementation of the Sheets.""" from typing import Any, Dict, Iterator, MutableMapping from tensorbay.dataset import Notes, RemoteData from tensorbay.label import Catalog from tensorbay.utility import URL from graviti.client import get_catalog, get_notes, list_data_details, list_segments from graviti.dataframe import DataFrame from graviti.portex import Extractors, catalog_to_schema, get_extractors from graviti.utility import LazyFactory, LazyList, NestedDict LazyLists = NestedDict[str, LazyList[Any]] class Sheets(MutableMapping[str, DataFrame]): """The basic structure of the Graviti sheets.""" _data: Dict[str, DataFrame] _dataset_id: str access_key: str url: str commit_id: str def __len__(self) -> int: return self._get_data().__len__() def __getitem__(self, key: str) -> DataFrame: return self._get_data().__getitem__(key) def __setitem__(self, key: str, value: DataFrame) -> None: self._get_data().__setitem__(key, value) def __delitem__(self, key: str) -> None: self._get_data().__delitem__(key) def __iter__(self) -> Iterator[str]: return self._get_data().__iter__() def _get_lazy_lists(self, factory: LazyFactory, extractors: Extractors) -> LazyLists: lazy_lists: LazyLists = {} for key, arguments in extractors.items(): if isinstance(arguments, tuple): lazy_lists[key] = factory.create_list(*arguments) else: lazy_lists[key] = self._get_lazy_lists(factory, arguments) return lazy_lists def _init_data(self) -> None: self._data = {} response = list_segments( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) for sheet in response["segments"]: sheet_name = sheet["name"] data_details = list_data_details( self.url, self.access_key, self._dataset_id, sheet_name, commit=self.commit_id, ) def factory_getter( offset: int, limit: int, sheet_name: str = sheet_name ) -> Dict[str, Any]: return list_data_details( self.url, self.access_key, self._dataset_id, sheet_name, commit=self.commit_id, offset=offset, limit=limit, ) factory = LazyFactory( data_details["totalCount"], 128, factory_getter, ) catalog = get_catalog( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) first_data_details = data_details["dataDetails"][0] remote_data = RemoteData.from_response_body( first_data_details, url=URL( first_data_details["url"], updater=lambda: "update is not supported currently" ), ) notes = get_notes( self.url, self.access_key, self._dataset_id, commit=self.commit_id, ) schema = catalog_to_schema( Catalog.loads(catalog["catalog"]), remote_data, Notes.loads(notes) ) lazy_lists = self._get_lazy_lists(factory, get_extractors(schema)) self._data[sheet_name] = DataFrame.from_lazy_lists(lazy_lists) def _get_data(self) -> Dict[str, DataFrame]: if not hasattr(self, "_data"): self._init_data() return self._data
py
1a447947e079c67f9380eef34bf5dee2ecb8b779
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=protected-access """Wrapper layers: layers that augment the functionality of another layer. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.eager import context from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers.recurrent import _standardize_args from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import layer_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.Wrapper') class Wrapper(Layer): """Abstract wrapper base class. Wrappers take another layer and augment it in various ways. Do not use this class as a layer, it is only an abstract base class. Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. Arguments: layer: The layer to be wrapped. """ def __init__(self, layer, **kwargs): assert isinstance(layer, Layer) self.layer = layer super(Wrapper, self).__init__(**kwargs) def build(self, input_shape=None): if not self.layer.built: self.layer.build(input_shape) self.layer.built = True self.built = True @property def activity_regularizer(self): if hasattr(self.layer, 'activity_regularizer'): return self.layer.activity_regularizer else: return None def get_config(self): config = {'layer': generic_utils.serialize_keras_object(self.layer)} base_config = super(Wrapper, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top # Avoid mutating the input dict config = copy.deepcopy(config) layer = deserialize_layer( config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config) @keras_export('keras.layers.TimeDistributed') class TimeDistributed(Wrapper): """This wrapper allows to apply a layer to every temporal slice of an input. The input should be at least 3D, and the dimension of index one will be considered to be the temporal dimension. Consider a batch of 32 video samples, where each sample is a 128x128 RGB image with `channels_last` data format, across 10 timesteps. The batch input shape is `(32, 10, 128, 128, 3)`. You can then use `TimeDistributed` to apply a `Conv2D` layer to each of the 10 timesteps, independently: >>> inputs = tf.keras.Input(shape=(10, 128, 128, 3)) >>> conv_2d_layer = tf.keras.layers.Conv2D(64, (3, 3)) >>> outputs = tf.keras.layers.TimeDistributed(conv_2d_layer)(inputs) >>> outputs.shape TensorShape([None, 10, 126, 126, 64]) Arguments: layer: a `tf.keras.layers.Layer` instance. Call arguments: inputs: Input tensor. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the wrapped layer (only if the layer supports this argument). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. This argument is passed to the wrapped layer (only if the layer supports this argument). Raises: ValueError: If not initialized with a `tf.keras.layers.Layer` instance. """ def __init__(self, layer, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `TimeDistributed` layer with a ' '`tf.keras.layers.Layer` instance. You passed: {input}'.format( input=layer)) super(TimeDistributed, self).__init__(layer, **kwargs) self.supports_masking = True # It is safe to use the fast, reshape-based approach with all of our # built-in Layers. self._always_use_reshape = ( layer_utils.is_builtin_layer(layer) and not getattr(layer, 'stateful', False)) def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None): """Finds non-specific dimensions in the static shapes. The static shapes are replaced with the corresponding dynamic shapes of the tensor. Arguments: init_tuple: a tuple, the first part of the output shape tensor: the tensor from which to get the (static and dynamic) shapes as the last part of the output shape start_idx: int, which indicate the first dimension to take from the static shape of the tensor int_shape: an alternative static shape to take as the last part of the output shape Returns: The new int_shape with the first part from init_tuple and the last part from either `int_shape` (if provided) or `tensor.shape`, where every `None` is replaced by the corresponding dimension from `tf.shape(tensor)`. """ # replace all None in int_shape by K.shape if int_shape is None: int_shape = K.int_shape(tensor)[start_idx:] if not any(not s for s in int_shape): return init_tuple + tuple(int_shape) shape = K.shape(tensor) int_shape = list(int_shape) for i, s in enumerate(int_shape): if not s: int_shape[i] = shape[start_idx + i] return init_tuple + tuple(int_shape) def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) < 3: raise ValueError( '`TimeDistributed` Layer should be passed an `input_shape ` ' 'with at least 3 dimensions, received: ' + str(input_shape)) # Don't enforce the batch or time dimension. self.input_spec = InputSpec(shape=[None, None] + input_shape[2:]) child_input_shape = [input_shape[0]] + input_shape[2:] super(TimeDistributed, self).build(tuple(child_input_shape)) self.built = True def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() child_input_shape = tensor_shape.TensorShape([input_shape[0]] + input_shape[2:]) child_output_shape = self.layer.compute_output_shape(child_input_shape) if not isinstance(child_output_shape, tensor_shape.TensorShape): child_output_shape = tensor_shape.TensorShape(child_output_shape) child_output_shape = child_output_shape.as_list() timesteps = input_shape[1] return tensor_shape.TensorShape([child_output_shape[0], timesteps] + child_output_shape[1:]) def call(self, inputs, training=None, mask=None): kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training input_shape = K.int_shape(inputs) if input_shape[0] and not self._always_use_reshape: inputs, row_lengths = K.convert_inputs_if_ragged(inputs) is_ragged_input = row_lengths is not None # batch size matters, use rnn-based implementation def step(x, _): output = self.layer(x, **kwargs) return output, [] _, outputs, _ = K.rnn( step, inputs, initial_states=[], input_length=row_lengths[0] if is_ragged_input else input_shape[1], mask=mask, unroll=False) y = K.maybe_convert_to_ragged(is_ragged_input, outputs, row_lengths) else: # No batch size specified, therefore the layer will be able # to process batches of any size. # We can go with reshape-based implementation for performance. if isinstance(inputs, ragged_tensor.RaggedTensor): y = self.layer(inputs.values, **kwargs) y = ragged_tensor.RaggedTensor.from_row_lengths( y, inputs.nested_row_lengths()[0]) else: input_length = input_shape[1] if not input_length: input_length = array_ops.shape(inputs)[1] inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) # Shape: (num_samples * timesteps, ...). And track the # transformation in self._input_map. inputs = array_ops.reshape(inputs, inner_input_shape) # (num_samples * timesteps, ...) if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) kwargs['mask'] = K.reshape(mask, inner_mask_shape) y = self.layer(inputs, **kwargs) # Shape: (num_samples, timesteps, ...) output_shape = self.compute_output_shape(input_shape).as_list() output_shape = self._get_shape_tuple((-1, input_length), y, 1, output_shape[2:]) y = array_ops.reshape(y, output_shape) if not context.executing_eagerly(): # Set the static shape for the result since it might be lost during # array_ops reshape, eg, some `None` dim in the result could be # inferred. y.set_shape(self.compute_output_shape(input_shape)) return y def compute_mask(self, inputs, mask=None): """Computes an output mask tensor for Embedding layer. This is based on the inputs, mask, and the inner layer. If batch size is specified: Simply return the input `mask`. (An rnn-based implementation with more than one rnn inputs is required but not supported in tf.keras yet.) Otherwise we call `compute_mask` of the inner layer at each time step. If the output mask at each time step is not `None`: (E.g., inner layer is Masking or RNN) Concatenate all of them and return the concatenation. If the output mask at each time step is `None` and the input mask is not `None`:(E.g., inner layer is Dense) Reduce the input_mask to 2 dimensions and return it. Otherwise (both the output mask and the input mask are `None`): (E.g., `mask` is not used at all) Return `None`. Arguments: inputs: Tensor with shape [batch size, timesteps, ...] indicating the input to TimeDistributed. If static shape information is available for "batch size", `mask` is returned unmodified. mask: Either None (indicating no masking) or a Tensor indicating the input mask for TimeDistributed. The shape can be static or dynamic. Returns: Either None (no masking), or a [batch size, timesteps, ...] Tensor with an output mask for the TimeDistributed layer with the shape beyond the second dimension being the value of the input mask shape(if the computed output mask is none), an output mask with the shape beyond the first dimension being the value of the mask shape(if mask is not None) or output mask with the shape beyond the first dimension being the value of the computed output shape. """ # cases need to call the layer.compute_mask when input_mask is None: # Masking layer and Embedding layer with mask_zero input_shape = K.int_shape(inputs) if input_shape[0] and not self._always_use_reshape or isinstance( inputs, ragged_tensor.RaggedTensor): # batch size matters, we currently do not handle mask explicitly, or if # the layer always uses reshape approach, or the input is a ragged tensor. return mask inner_mask = mask if inner_mask is not None: inner_mask_shape = self._get_shape_tuple((-1,), mask, 2) inner_mask = K.reshape(inner_mask, inner_mask_shape) inner_input_shape = self._get_shape_tuple((-1,), inputs, 2) inner_inputs = array_ops.reshape(inputs, inner_input_shape) output_mask = self.layer.compute_mask(inner_inputs, inner_mask) if output_mask is None: if mask is None: return None # input_mask is not None, and output_mask is None: # we should return a not-None mask output_mask = mask for _ in range(2, len(K.int_shape(mask))): output_mask = K.any(output_mask, axis=-1) else: # output_mask is not None. We need to reshape it input_length = input_shape[1] if not input_length: input_length = K.shape(inputs)[1] output_mask_int_shape = K.int_shape(output_mask) if output_mask_int_shape is None: # if the output_mask does not have a static shape, # its shape must be the same as mask's if mask is not None: output_mask_int_shape = K.int_shape(mask) else: output_mask_int_shape = K.compute_output_shape(input_shape)[:-1] output_mask_shape = self._get_shape_tuple( (-1, input_length), output_mask, 1, output_mask_int_shape[1:]) output_mask = K.reshape(output_mask, output_mask_shape) return output_mask @keras_export('keras.layers.Bidirectional') class Bidirectional(Wrapper): """Bidirectional wrapper for RNNs. Arguments: layer: `keras.layers.RNN` instance, such as `keras.layers.LSTM` or `keras.layers.GRU`. It could also be a `keras.layers.Layer` instance that meets the following criteria: 1. Be a sequence-processing layer (accepts 3D+ inputs). 2. Have a `go_backwards`, `return_sequences` and `return_state` attribute (with the same semantics as for the `RNN` class). 3. Have an `input_spec` attribute. 4. Implement serialization via `get_config()` and `from_config()`. Note that the recommended way to create new RNN layers is to write a custom RNN cell and use it with `keras.layers.RNN`, instead of subclassing `keras.layers.Layer` directly. merge_mode: Mode by which outputs of the forward and backward RNNs will be combined. One of {'sum', 'mul', 'concat', 'ave', None}. If None, the outputs will not be combined, they will be returned as a list. Default value is 'concat'. backward_layer: Optional `keras.layers.RNN`, or `keras.layers.Layer` instance to be used to handle backwards input processing. If `backward_layer` is not provided, the layer instance passed as the `layer` argument will be used to generate the backward layer automatically. Note that the provided `backward_layer` layer should have properties matching those of the `layer` argument, in particular it should have the same values for `stateful`, `return_states`, `return_sequence`, etc. In addition, `backward_layer` and `layer` should have different `go_backwards` argument values. A `ValueError` will be raised if these requirements are not met. Call arguments: The call arguments for this layer are the same as those of the wrapped RNN layer. Beware that when passing the `initial_state` argument during the call of this layer, the first half in the list of elements in the `initial_state` list will be passed to the forward RNN call and the last half in the list of elements will be passed to the backward RNN call. Raises: ValueError: 1. If `layer` or `backward_layer` is not a `Layer` instance. 2. In case of invalid `merge_mode` argument. 3. If `backward_layer` has mismatched properties compared to `layer`. Examples: ```python model = Sequential() model.add(Bidirectional(LSTM(10, return_sequences=True), input_shape=(5, 10))) model.add(Bidirectional(LSTM(10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') # With custom backward layer model = Sequential() forward_layer = LSTM(10, return_sequences=True) backward_layer = LSTM(10, activation='relu', return_sequences=True, go_backwards=True) model.add(Bidirectional(forward_layer, backward_layer=backward_layer, input_shape=(5, 10))) model.add(Dense(5)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') ``` """ def __init__(self, layer, merge_mode='concat', weights=None, backward_layer=None, **kwargs): if not isinstance(layer, Layer): raise ValueError( 'Please initialize `Bidirectional` layer with a ' '`Layer` instance. You passed: {input}'.format(input=layer)) if backward_layer is not None and not isinstance(backward_layer, Layer): raise ValueError('`backward_layer` need to be a `Layer` instance. ' 'You passed: {input}'.format(input=backward_layer)) if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]: raise ValueError('Invalid merge mode. ' 'Merge mode should be one of ' '{"sum", "mul", "ave", "concat", None}') # We don't want to track `layer` since we're already tracking the two copies # of it we actually run. self._setattr_tracking = False super(Bidirectional, self).__init__(layer, **kwargs) self._setattr_tracking = True # Recreate the forward layer from the original layer config, so that it will # not carry over any state from the layer. self.forward_layer = self._recreate_layer_from_config(layer) if backward_layer is None: self.backward_layer = self._recreate_layer_from_config( layer, go_backwards=True) else: self.backward_layer = backward_layer # Keep the custom backward layer config, so that we can save it later. The # layer's name might be updated below with prefix 'backward_', and we want # to preserve the original config. self._backward_layer_config = generic_utils.serialize_keras_object( backward_layer) self.forward_layer._name = 'forward_' + self.forward_layer.name self.backward_layer._name = 'backward_' + self.backward_layer.name self._verify_layer_config() def force_zero_output_for_mask(layer): # Force the zero_output_for_mask to be True if returning sequences. if getattr(layer, 'zero_output_for_mask', None) is not None: layer.zero_output_for_mask = layer.return_sequences force_zero_output_for_mask(self.forward_layer) force_zero_output_for_mask(self.backward_layer) self.merge_mode = merge_mode if weights: nw = len(weights) self.forward_layer.initial_weights = weights[:nw // 2] self.backward_layer.initial_weights = weights[nw // 2:] self.stateful = layer.stateful self.return_sequences = layer.return_sequences self.return_state = layer.return_state self.supports_masking = True self._trainable = True self._num_constants = 0 self.input_spec = layer.input_spec def _verify_layer_config(self): """Ensure the forward and backward layers have valid common property.""" if self.forward_layer.go_backwards == self.backward_layer.go_backwards: raise ValueError('Forward layer and backward layer should have different ' '`go_backwards` value.') common_attributes = ('stateful', 'return_sequences', 'return_state') for a in common_attributes: forward_value = getattr(self.forward_layer, a) backward_value = getattr(self.backward_layer, a) if forward_value != backward_value: raise ValueError( 'Forward layer and backward layer are expected to have the same ' 'value for attribute {attr}, got {forward} and {backward}'.format( attr=a, forward=forward_value, backward=backward_value)) def _recreate_layer_from_config(self, layer, go_backwards=False): # When recreating the layer from its config, it is possible that the layer # is a RNN layer that contains custom cells. In this case we inspect the # layer and pass the custom cell class as part of the `custom_objects` # argument when calling `from_config`. # See https://github.com/tensorflow/tensorflow/issues/26581 for more detail. config = layer.get_config() if go_backwards: config['go_backwards'] = not config['go_backwards'] if 'custom_objects' in tf_inspect.getfullargspec( layer.__class__.from_config).args: custom_objects = {} cell = getattr(layer, 'cell', None) if cell is not None: custom_objects[cell.__class__.__name__] = cell.__class__ # For StackedRNNCells stacked_cells = getattr(cell, 'cells', []) for c in stacked_cells: custom_objects[c.__class__.__name__] = c.__class__ return layer.__class__.from_config(config, custom_objects=custom_objects) else: return layer.__class__.from_config(config) @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): output_shape = self.forward_layer.compute_output_shape(input_shape) if not isinstance(output_shape, tensor_shape.TensorShape): output_shape = tensor_shape.TensorShape(output_shape) output_shape = tuple(output_shape.as_list()) if self.return_state: state_shape = output_shape[1:] output_shape = output_shape[0] if self.merge_mode == 'concat': output_shape = list(output_shape) output_shape[-1] *= 2 output_shape = tuple(output_shape) elif self.merge_mode is None: output_shape = [output_shape, copy.copy(output_shape)] if self.return_state: if self.merge_mode is None: return output_shape + state_shape + copy.copy(state_shape) return [output_shape] + state_shape + copy.copy(state_shape) return output_shape def __call__(self, inputs, initial_state=None, constants=None, **kwargs): """`Bidirectional.__call__` implements the same API as the wrapped `RNN`.""" inputs, initial_state, constants = _standardize_args( inputs, initial_state, constants, self._num_constants) if isinstance(inputs, list): if len(inputs) > 1: initial_state = inputs[1:] inputs = inputs[0] if initial_state is None and constants is None: return super(Bidirectional, self).__call__(inputs, **kwargs) # Applies the same workaround as in `RNN.__call__` additional_inputs = [] additional_specs = [] if initial_state is not None: # Check if `initial_state` can be splitted into half num_states = len(initial_state) if num_states % 2 > 0: raise ValueError( 'When passing `initial_state` to a Bidirectional RNN, ' 'the state should be a list containing the states of ' 'the underlying RNNs. ' 'Found: ' + str(initial_state)) kwargs['initial_state'] = initial_state additional_inputs += initial_state state_specs = [InputSpec(shape=K.int_shape(state)) for state in initial_state] self.forward_layer.state_spec = state_specs[:num_states // 2] self.backward_layer.state_spec = state_specs[num_states // 2:] additional_specs += state_specs if constants is not None: kwargs['constants'] = constants additional_inputs += constants constants_spec = [InputSpec(shape=K.int_shape(constant)) for constant in constants] self.forward_layer.constants_spec = constants_spec self.backward_layer.constants_spec = constants_spec additional_specs += constants_spec self._num_constants = len(constants) self.forward_layer._num_constants = self._num_constants self.backward_layer._num_constants = self._num_constants is_keras_tensor = K.is_keras_tensor(additional_inputs[0]) for tensor in additional_inputs: if K.is_keras_tensor(tensor) != is_keras_tensor: raise ValueError('The initial state of a Bidirectional' ' layer cannot be specified with a mix of' ' Keras tensors and non-Keras tensors' ' (a "Keras tensor" is a tensor that was' ' returned by a Keras layer, or by `Input`)') if is_keras_tensor: # Compute the full input spec, including state full_input = [inputs] + additional_inputs # The original input_spec is None since there could be a nested tensor # input. Update the input_spec to match the inputs. full_input_spec = [None for _ in range(len(nest.flatten(inputs))) ] + additional_specs # Removing kwargs since the value are passed with input list. kwargs['initial_state'] = None kwargs['constants'] = None # Perform the call with temporarily replaced input_spec original_input_spec = self.input_spec self.input_spec = full_input_spec output = super(Bidirectional, self).__call__(full_input, **kwargs) self.input_spec = original_input_spec return output else: return super(Bidirectional, self).__call__(inputs, **kwargs) def call(self, inputs, training=None, mask=None, initial_state=None, constants=None): """`Bidirectional.call` implements the same API as the wrapped `RNN`.""" kwargs = {} if generic_utils.has_arg(self.layer.call, 'training'): kwargs['training'] = training if generic_utils.has_arg(self.layer.call, 'mask'): kwargs['mask'] = mask if generic_utils.has_arg(self.layer.call, 'constants'): kwargs['constants'] = constants if generic_utils.has_arg(self.layer.call, 'initial_state'): if isinstance(inputs, list) and len(inputs) > 1: # initial_states are keras tensors, which means they are passed in # together with inputs as list. The initial_states need to be split into # forward and backward section, and be feed to layers accordingly. forward_inputs = [inputs[0]] backward_inputs = [inputs[0]] pivot = (len(inputs) - self._num_constants) // 2 + 1 # add forward initial state forward_inputs += inputs[1:pivot] if not self._num_constants: # add backward initial state backward_inputs += inputs[pivot:] else: # add backward initial state backward_inputs += inputs[pivot:-self._num_constants] # add constants for forward and backward layers forward_inputs += inputs[-self._num_constants:] backward_inputs += inputs[-self._num_constants:] forward_state, backward_state = None, None if 'constants' in kwargs: kwargs['constants'] = None elif initial_state is not None: # initial_states are not keras tensors, eg eager tensor from np array. # They are only passed in from kwarg initial_state, and should be passed # to forward/backward layer via kwarg initial_state as well. forward_inputs, backward_inputs = inputs, inputs half = len(initial_state) // 2 forward_state = initial_state[:half] backward_state = initial_state[half:] else: forward_inputs, backward_inputs = inputs, inputs forward_state, backward_state = None, None y = self.forward_layer(forward_inputs, initial_state=forward_state, **kwargs) y_rev = self.backward_layer(backward_inputs, initial_state=backward_state, **kwargs) else: y = self.forward_layer(inputs, **kwargs) y_rev = self.backward_layer(inputs, **kwargs) if self.return_state: states = y[1:] + y_rev[1:] y = y[0] y_rev = y_rev[0] if self.return_sequences: time_dim = 0 if getattr(self.forward_layer, 'time_major', False) else 1 y_rev = K.reverse(y_rev, time_dim) if self.merge_mode == 'concat': output = K.concatenate([y, y_rev]) elif self.merge_mode == 'sum': output = y + y_rev elif self.merge_mode == 'ave': output = (y + y_rev) / 2 elif self.merge_mode == 'mul': output = y * y_rev elif self.merge_mode is None: output = [y, y_rev] else: raise ValueError( 'Unrecognized value for `merge_mode`: %s' % (self.merge_mode)) if self.return_state: if self.merge_mode is None: return output + states return [output] + states return output def reset_states(self): self.forward_layer.reset_states() self.backward_layer.reset_states() def build(self, input_shape): with K.name_scope(self.forward_layer.name): self.forward_layer.build(input_shape) with K.name_scope(self.backward_layer.name): self.backward_layer.build(input_shape) self.built = True def compute_mask(self, inputs, mask): if isinstance(mask, list): mask = mask[0] if self.return_sequences: if not self.merge_mode: output_mask = [mask, mask] else: output_mask = mask else: output_mask = [None, None] if not self.merge_mode else None if self.return_state: states = self.forward_layer.states state_mask = [None for _ in states] if isinstance(output_mask, list): return output_mask + state_mask * 2 return [output_mask] + state_mask * 2 return output_mask @property def constraints(self): constraints = {} if hasattr(self.forward_layer, 'constraints'): constraints.update(self.forward_layer.constraints) constraints.update(self.backward_layer.constraints) return constraints def get_config(self): config = {'merge_mode': self.merge_mode} if self._num_constants: config['num_constants'] = self._num_constants if hasattr(self, '_backward_layer_config'): config['backward_layer'] = self._backward_layer_config base_config = super(Bidirectional, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): # Instead of updating the input, create a copy and use that. config = copy.deepcopy(config) num_constants = config.pop('num_constants', 0) # Handle forward layer instantiation (as would parent class). from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top config['layer'] = deserialize_layer( config['layer'], custom_objects=custom_objects) # Handle (optional) backward layer instantiation. backward_layer_config = config.pop('backward_layer', None) if backward_layer_config is not None: backward_layer = deserialize_layer( backward_layer_config, custom_objects=custom_objects) config['backward_layer'] = backward_layer # Instantiate the wrapper, adjust it and return it. layer = cls(**config) layer._num_constants = num_constants return layer
py
1a44796606110a518716a98c7770c98ffb07a8a2
"""Ajout vigilance meteo Revision ID: 901a31d192ad Revises: dcffac33e4fd Create Date: 2021-11-26 16:35:51.243300 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '901a31d192ad' down_revision = 'dcffac33e4fd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('vigilance_meteo', sa.Column('id', sa.Integer(), nullable=False), sa.Column('zone_id', sa.Integer(), nullable=True), sa.Column('phenomene_id', sa.Integer(), nullable=True), sa.Column('date_export', sa.DateTime(), nullable=True), sa.Column('couleur_id', sa.Integer(), nullable=True), sa.Column('validity', postgresql.TSTZRANGE(), nullable=False), sa.Column('to_show', postgresql.DATERANGE(), nullable=False), sa.ForeignKeyConstraint(['zone_id'], ['indice_schema.zone.id'], ), sa.PrimaryKeyConstraint('id'), schema='indice_schema' ) op.create_index('vigilance_zone_phenomene_date_export_idx', 'vigilance_meteo', ['zone_id', 'phenomene_id', 'date_export'], unique=False, schema='indice_schema') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index('vigilance_zone_phenomene_date_export_idx', table_name='vigilance_meteo', schema='indice_schema') op.drop_table('vigilance_meteo', schema='indice_schema') # ### end Alembic commands ###
py
1a447efc183811bf00b95754bb571d4497a88073
""" [PYTHON NAMING CONVENTION] module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name. """ import sys, os import cv2 import re import pprint import numpy as np import time, datetime import pickle from modules.utils import ( my_print, quaternion2euler, camel2snake, snake2camel, MyVideo, str2float) from modules.constants import Constants try: import mujoco_py as mjPy except ImportError as e: raise error.DependencyNotInstalled( "{}. (HINT: you need to install mujoco_py, \ and also perform the setup instructions here: \ https://github.com/openai/mujoco-py/.)".format( e ) ) # from mujoco_py import class Simulation( ): """ Running a single Whip Simulation [INHERITANCE] [DESCRIPTION] [NOTE] All of the model files are saved in "models" directory, and we are using "relative directory" to generate and find the .xml model file. Hence do not change of "model directory" variable within this """ MODEL_DIR = Constants.MODEL_DIR SAVE_DIR = Constants.SAVE_DIR VISUALIZE = True current_time = 0 controller = None # Control input function def __init__( self, model_name = None, is_visualize = True, arg_parse = None ): """ Default constructor of THIS class [ARGUMENTS] [NAME] [TYPE] [DESCRIPTION] (1) model_name string The xml model file name for running the MuJoCo simulation. (2) is_visualized boolean Turn ON/OFF the mjViewer (visualizer) of the simulation. This flag is useful when optimizing a simulation. (3) arg_parse dictionary Dictionary which contains all the arguments given to the main `run.py` script. """ if model_name is None: self.mjModel = None self.mjSim = None self.mjData = None self.mjViewer = None self.args = arg_parse my_print( WARNING = "MODEL FILE NOT GIVEN, PLEASE INPUT XML MODEL FILE WITH `attach_model` MEMBER FUNCTION" ) else: # If model_name is given, then check if there exist ".xml" at the end, if not, append model_name = model_name + ".xml" if model_name[ -4: ] != ".xml" else model_name self.model_name = model_name # Based on the model_name, construct the simulation. self.mjModel = mjPy.load_model_from_path( self.MODEL_DIR + model_name ) # Loading xml model as and save it as "model" self.mjSim = mjPy.MjSim( self.mjModel ) # Construct the simulation environment and save it as "sim" self.mjData = self.mjSim.data # Construct the basic MuJoCo data and save it as "mjData" self.mjViewer = mjPy.MjViewerBasic( self.mjSim ) if is_visualize else None # Construct the basic MuJoCo viewer and save it as "myViewer" self.args = arg_parse # Saving the default simulation variables self.fps = 60 # Frames per second for the mujoco render self.dt = self.mjModel.opt.timestep # Time step of the simulation [sec] self.sim_step = 0 # Number of steps of the simulation, in integer [-] self.update_rate = round( 1 / self.dt / self.fps ) # 1/dt = number of steps N for 1 second simulaiton, dividing this with frames-per-second (fps) gives us the frame step to be updated. self.g = self.mjModel.opt.gravity # Calling the gravity vector of the simulation environment # Saving additional model parameters for multiple purposes self.act_names = self.mjModel.actuator_names self.geom_names = self.mjModel.geom_names self.idx_geom_names = [ self.mjModel._geom_name2id[ name ] for name in self.geom_names ] self.n_acts = len( self.mjModel.actuator_names ) self.n_limbs = '-'.join( self.mjModel.body_names ).lower().count( 'arm' ) self.run_time = float( self.args[ 'runTime' ] ) # Run time of the total simulation self.start_time = float( self.args[ 'startTime' ] ) # Start time of the movements self.VISUALIZE = is_visualize # saving the VISUALIZE Flag def attach_model( self, model_name ): if self.mjModel is not None: my_print( WARNING = "MODEL FILE EXIST! OVERWRITTING THE WHOLE MUJOCO FILE" ) self.__init__( model_name ) def attach_controller( self, controller_name ): """ Attaching the controller object for running the simulation. For detailed controller description, please check "controllers.py" """ self.controller = controller_name def set_initial_condition( self ): """ Manually setting the initial condition of the system. """ if "_w_" in self.model_name: # If whip is attached to the model. tmp = self.mjData.get_body_xquat( "node1" ) # Getting the quaternion angle of the whip handle yaw, pitch, roll = quaternion2euler( tmp ) self.mjData.qpos[ self.n_acts ] = - roll # Setting the handle posture to make the whip being straight down at equilibrium. self.mjData.qpos[ self.n_acts + 1 ] = + pitch # Setting the handle posture to make the whip being straight down at equilibrium. self.mjSim.forward() # Running the forward kinematics, or setting the model as the given qpos WITHOUT proceeding the time step. Therefore no simulation time step is executed. def run( self ): """ Running a single simulation. [INPUT] [VAR NAME] [TYPE] [DESCRIPTION] (1) run_time float The whole run time of the simulation. (2) ctrl_start_time float """ # Check if mjModel or mjSim is empty and raise error if self.mjModel is None or self.mjSim is None: raise ValueError( "mjModel and mjSim is Empty! Add it before running simulation" ) # Warn the user if input and output function is empty if self.controller is None: raise ValueError( "CONTROLLER NOT ATTACHED TO SIMULATION. \ PLEASE REFER TO METHOD 'attach_output_function' and 'attach_controller' " ) if self.args[ 'recordVideo' ]: vid = MyVideo( fps = self.fps * float( self.args[ 'vidRate' ] ), vid_dir = self.args[ 'saveDir' ] ) # If args doesn't have saveDir attribute, save vid_dir as None if self.args[ 'saveData' ]: file = open( self.args[ 'saveDir' ] + "data_log.txt", "w+" ) # Setting the camera position for the simulation # [camParameters]: [ 0.17051, 0.21554, -0.82914, 2.78528,-30.68421,162.42105 ] # [camParameters]: [ -0.10325, 0. , -2.51498, 7.278 ,-45. , 90. ] if self.args[ 'camPos' ] is not None: tmp = str2float( self.args[ 'camPos' ] ) self.mjViewer.cam.lookat[ 0:3 ] = tmp[ 0 : 3 ] self.mjViewer.cam.distance = tmp[ 3 ] self.mjViewer.cam.elevation = tmp[ 4 ] self.mjViewer.cam.azimuth = tmp[ 5 ] self.set_initial_condition( ) # Setting initial condition. Some specific controllers need to specify the initial condition while self.current_time <= self.run_time: if self.sim_step % self.update_rate == 0: if self.mjViewer is not None: self.mjViewer.render( ) # Render the simulation my_print( currentTime = self.current_time, a = self.controller.a ) if self.args[ 'verbose' ]: my_print( camParameters = [ self.mjViewer.cam.lookat[ 0 ], self.mjViewer.cam.lookat[ 1 ], self.mjViewer.cam.lookat[ 2 ], self.mjViewer.cam.distance, self.mjViewer.cam.elevation, self.mjViewer.cam.azimuth ] ) if self.args[ 'recordVideo' ]: vid.write( self.mjViewer ) if self.args[ 'saveData' ]: my_print( currentTime = self.current_time, jointAngleActual = self.mjData.qpos[ : ], geomXYZPositions = self.mjData.geom_xpos[ self.idx_geom_names ], desiredTrajectory = self.controller.traj_pos[ : ], trajectoryError = self.controller.traj_pos[ : ] - self.mjData.get_geom_xpos( "EEGEOM" ) if self.controller.type == 2 else self.controller.traj_pos[ : ] - self.mjData.qpos[ : ], file = file ) # [input controller] # input_ref: The data array that are aimed to be inputted (e.g., qpos, qvel, qctrl etc.) # input_idx: The specific index of input_ref data array that should be inputted # input: The actual input value which is inputted to input_ref input_ref, input_idx, input = self.controller.input_calc( self.start_time, self.current_time ) input_ref[ input_idx ] = input self.mjSim.step( ) # Single step update if( self.is_sim_unstable() ): # Check if simulation is stable # If not optimization, and result unstable, then save the detailed data print( "[WARNING] UNSTABLE SIMULATION, HALTED AT {0:f} for at {1:f}".format( self.current_time, self.run_time ) ) if self.args[ 'saveData' ]: print( "[WARNING] UNSTABLE SIMULATION, HALTED AT {0:f} for at {1:f}".format( self.current_time, self.run_time ), file = file ) file.close( ) break self.current_time = self.mjData.time # Update the current_time variable of the simulation if self.sim_step % self.update_rate == 0: my_print( trajectoryError = self.controller.traj_pos[ : ] - self.mjData.get_geom_xpos( "EEGEOM" ) if self.controller.type == 2 else self.controller.traj_pos[ : ] - self.mjData.qpos[ : ],) if self.args[ 'saveData' ]: # Saving all the necessary datas for the simulation my_print( inputVal = input, file = file ) self.sim_step += 1 if self.args[ 'recordVideo' ]: vid.release( ) # If simulation is finished, wrap-up the video file. if self.args[ 'saveData' ]: file.close() def save_simulation_data( self, dir ): """ Save all the details of the controller parameters, inputs and output of the simulation """ if dir is not None and dir[ -1 ] != "/": # Quick Check of whether result_dir has backslash "/" at the end dir += "/" # Append the backslash # [TIP] [MOSES] # By using the "with" function you don't need to call f.close( ), the file will automatically close the opened file. # [REF] https://lerner.co.il/2015/01/18/dont-use-python-close-files-answer-depends/ with open( dir + "simulation_details.txt", "w+" ) as f: pprint.pprint( self.controller.__dict__, f ) # Using pretty-print (pprint) to flush out the data in a much readable format print( self.args , file = f ) # Flushing out all the arguments detail. def is_sim_unstable( self ): thres = 1 * 10 ** 6 if ( max( np.absolute( self.mjData.qpos ) ) > thres ) or \ ( max( np.absolute( self.mjData.qvel ) ) > thres ) or \ ( max( np.absolute( self.mjData.qacc ) ) > thres ): return True else: return False def reset( self ): """ Reseting the mujoco simulation """ self.current_time = 0 self.sim_step = 0 self.mjSim.reset( )
py
1a447fe3914369392f9a2c9f12c41c4ee8bb75b4
#!/usr/bin/env python # coding: utf-8 # Author: Arne Neumann <[email protected]> from tempfile import NamedTemporaryFile from lxml import etree from rstconverter.tree import debug_root_label, DGParentedTree, t import rstconverter as rstc EXPECTED_SVG_TREE = """<?xml version="1.0" encoding="utf-8" ?> <svg baseProfile="full" height="72px" preserveAspectRatio="xMidYMid meet" style="font-family: times, serif; font-weight:normal; font-style: normal; font-size: 16px;" version="1.1" viewBox="0,0,80.0,72.0" width="80px" xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" xmlns:xlink="http://www.w3.org/1999/xlink"><defs /><svg width="100%" x="0" y="0em"><defs /><text text-anchor="middle" x="50%" y="1em">foo</text></svg><svg width="50%" x="0%" y="3em"><defs /><svg width="100%" x="0" y="0em"><defs /><text text-anchor="middle" x="50%" y="1em">bar</text></svg></svg><line stroke="black" x1="50%" x2="25%" y1="1.2em" y2="3em" /><svg width="50%" x="50%" y="3em"><defs /><svg width="100%" x="0" y="0em"><defs /><text text-anchor="middle" x="50%" y="1em">baz</text></svg></svg><line stroke="black" x1="50%" x2="75%" y1="1.2em" y2="3em" /></svg>""" def test_t(): assert t("", []) == DGParentedTree("", []) assert t("") == DGParentedTree("", []) assert t("foo", []) == DGParentedTree("foo", []) assert t("foo") == DGParentedTree("foo", []) assert t("foo", ["bar"]) == DGParentedTree("foo", ["bar"]) assert t("foo", ["bar", "baz"]) == DGParentedTree("foo", ["bar", "baz"]) def test_debug_root_label(): label = 'Foo' node_id = '21' assert debug_root_label(label, debug=False, root_id=None) == label assert debug_root_label(label, debug=False, root_id=node_id) == label assert debug_root_label(label, debug=True, root_id=None) == label assert debug_root_label(label, debug=True, root_id=node_id) == "Foo (21)" def test_write_svgtree(): """A ParentedTree can be converted into an SVG image using svgling.""" tree = DGParentedTree("foo", ["bar", "baz"]) # write SVG to file temp_file = NamedTemporaryFile() temp_file.close() rstc.write_svgtree(tree, temp_file.name) with open(temp_file.name, 'r') as svg_file: assert EXPECTED_SVG_TREE == svg_file.read() # return SVG as string assert EXPECTED_SVG_TREE == rstc.write_svgtree(tree)
py
1a448146522a5d893d07c24a1cb7b1e22c2471e6
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class SkusOperations(object): """SkusOperations operations. You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2019-11-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2019-11-01" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): """Get the list of StorageCache.Cache SKUs available to this subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ResourceSku :rtype: ~azure.mgmt.storagecache.models.ResourceSkuPaged[~azure.mgmt.storagecache.models.ResourceSku] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request def internal_paging(next_link=None): request = prepare_request(next_link) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response header_dict = None if raw: header_dict = {} deserialized = models.ResourceSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.StorageCache/skus'}
py
1a448178e1dc2cc107a98ae04ba50d4e73a24a36
from flask_wtf import FlaskForm from wtforms import StringField,PasswordField,BooleanField,SubmitField from wtforms.validators import Required,Email,EqualTo from ..models import User from wtforms import ValidationError class LoginForm(FlaskForm): email = StringField('Your Email Address',validators=[Required(),Email()]) password = PasswordField('Password',validators =[Required()]) remember = BooleanField('Remember me') submit = SubmitField('Sign In') class RegistrationForm(FlaskForm): email = StringField('Your Email Address',validators=[Required(),Email()]) username = StringField('Enter your username',validators = [Required()]) password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')]) password_confirm = PasswordField('Confirm Passwords',validators = [Required()]) submit = SubmitField('Sign Up') def validate_email(self,data_field): if User.query.filter_by(email =data_field.data).first(): raise ValidationError('There is an account with that email') def validate_username(self,data_field): if User.query.filter_by(username = data_field.data).first(): raise ValidationError('That username is taken')
py
1a4481efb168930bab2059ecd05b4a272a2fe6a9
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # michael a.g. aïvázis # orthologue # (c) 1998-2022 all rights reserved # """ Check that tuple conversions work as expected """ def test(): import pyre.descriptors # create a descriptor descriptor = pyre.descriptors.array() # casts # successful assert () == descriptor.coerce(()) assert () == descriptor.coerce([]) assert () == descriptor.coerce("()") assert () == descriptor.coerce("[]") assert (1,) == descriptor.coerce((1,)) assert (1,) == descriptor.coerce([1]) assert (1,) == descriptor.coerce("[1]") assert (1,) == descriptor.coerce("(1,)") assert (1, 2) == descriptor.coerce((1, 2)) assert (1, 2) == descriptor.coerce([1, 2]) assert (1, 2) == descriptor.coerce("(1,2)") assert (1, 2) == descriptor.coerce("[1,2]") assert (1, 2) == descriptor.coerce("(1, 2)") assert (1, 2) == descriptor.coerce("[1, 2]") # failures try: descriptor.coerce(test) assert False except descriptor.CastingError as error: pass return # main if __name__ == "__main__": # skip pyre initialization since we don't rely on the executive pyre_noboot = True # do... test() # end of file
py
1a448297a1e8fa79fc62ce0138b457ca48504fa7
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy from .._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class ResourceManagementClientConfiguration(Configuration): """Configuration for ResourceManagementClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. :type subscription_id: str """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any ) -> None: super(ResourceManagementClientConfiguration, self).__init__(**kwargs) if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = "2019-07-01" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION)) self._configure(**kwargs) def _configure( self, **kwargs: Any ) -> None: self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get('authentication_policy') if self.credential and not self.authentication_policy: self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
py
1a4482c10ed18b1e0d66df3434bd14ff1b65ef6b
import math import mpmath import numpy as np from PIL import Image import os class EvenDimensionError(Exception): # The required resolution for the image is a square with a center pixel that # has the same number of pixels to the left, to the right, above, and # underneath, which precludes any even number of pixels. pass class NumberError(Exception): # For a test of prime numbers, to make sure the number is a non-negative # integer. pass class UlamSpiral: ''' Makes Ulam spirals of any arbitrary sequence. A True/False function is required for each sequence. ''' SIDE = None # The length of the square. image_array = [] # Pixel color data. # If the PNG directory is not made, make it. IMAGE_DIR = os.path.dirname(os.path.realpath(__file__)) IMAGE_DIR = os.path.join(IMAGE_DIR, 'PNG') if not os.path.exists(IMAGE_DIR): os.makedirs(IMAGE_DIR) COLORS = None # Color palettes. is_prime_list = [] # A list for primes so recalculating is not necessary. is_not_prime_list = [] # To avoid recalculation. image_size = 1600 # For scaling the final image. image_size = (image_size, image_size) # The final image resolution. DIGITS_OF_PI = None # To store digits of pi. def __init__(self, sides=[41], modes=['prime'], colors=[[255, 255, 255]], debug_tests=False): ''' Make a basic introduction of the image essentails and specific initializations for sequences that require it. ''' if 'a037003' in modes or debug_tests: self.bake_pi(max(sides)**2+1) if 'a050704' in modes: for x in range(max(sides)**2+1): is_prime = self.is_prime(x) if x%1000 == 0: print('primes ' + str(x) + ' of ' + str(max(sides)**2)) if debug_tests: self.debug_tests() for i, mode in enumerate(modes): self.COLORS = colors[i%len(colors)] for side in sides: if side%2 == 0: raise EvenDimensionError self.SIDE = side self.CENTER = int((self.SIDE-1)/2+1) self.CENTER = [self.CENTER, self.CENTER] self.IMAGE_F = str(mode) + ' {:,}'.format(self.SIDE**2) + \ '.png' self.IMAGE_PATH = os.path.join(self.IMAGE_DIR, self.IMAGE_F) self.image_array = np.zeros((self.SIDE, self.SIDE, 3), \ dtype=np.uint8) self.calc_pixels(mode) self.write_image(mode) def calc_pixels(self, mode): ''' Follow the path requirements of an Ulam spiral. With each step, test the pixel for the sequence. ''' cursor = self.CENTER.copy() # The center pixel. # For the color palettes, keep count the concentric squares. That number # decides, for a pixel who passes the sequence test, which color from # the palettes to choose. num_square = 0 for x in range(1, self.SIDE**2+1): if x%100 == 0: print(str(mode) + ' {:,}'.format(x) + ' of ' + \ '{:,}'.format(self.SIDE**2)) if x > 1: if cursor == [self.CENTER[0]+num_square, self.CENTER[1]+num_square]: cursor[1] += 1 num_square += 1 elif cursor == [self.CENTER[0]+num_square, self.CENTER[1]-num_square]: cursor[1] += 1 elif cursor == [self.CENTER[0]-num_square, self.CENTER[1]-num_square]: cursor[0] += 1 elif cursor == [self.CENTER[0]-num_square, self.CENTER[1]+num_square]: cursor[1] -= 1 elif cursor[1] == self.CENTER[0]+num_square: cursor[0] -= 1 elif cursor[0] == self.CENTER[1]-num_square: cursor[1] -= 1 elif cursor[1] == self.CENTER[0]-num_square: cursor[0] += 1 elif cursor[0] == self.CENTER[1]+num_square: cursor[1] += 1 self.test_pixel(cursor, num_square, x, mode) def test_pixel(self, cursor, num_square, x, mode): ''' The hub for tests of a pixel's presence in a sequence. t_f is True or False depending on if it is or is not a part of the sequence. If true, change the pixel color. ''' t_f = None if mode == 'prime': t_f = self.is_prime(x) elif mode == 'triangular': t_f = self.is_triangular(x) elif mode == 'square': t_f = self.is_square(x) elif mode == 'pentagonal': t_f = self.is_pentagonal(x) elif mode == 'hexagonal': t_f = self.is_hexagonal(x) elif mode == 'heptagonal': t_f = self.is_heptagonal(x) elif mode == 'octogonal': t_f = self.is_octogonal(x) elif mode == 'nonagonal': t_f = self.is_nonagonal(x) elif mode == 'decagonal': t_f = self.is_decagonal(x) elif mode == 'hendecagonal': t_f = self.is_hendecagonal(x) elif mode == 'dodecagonal': t_f = self.is_dodecagonal(x) elif mode == 'fibonacci': t_f = self.is_fibonacci(x) elif mode == 'factorial': t_f = self.is_factorial(x) elif mode == 'mersenne_prime': t_f = self.is_mersenne_prime(x) elif mode == 'a030513': t_f = self.is_a030513(x) elif mode == 'a050704': t_f = self.is_a050704(x) elif mode == 'a037003': t_f = self.is_a037003(x) if t_f: color = self.COLORS[num_square % len(self.COLORS)] self.image_array[cursor[0]-1, cursor[1]-1][0] = color[0] self.image_array[cursor[0]-1, cursor[1]-1][1] = color[1] self.image_array[cursor[0]-1, cursor[1]-1][2] = color[2] def write_image(self, mode): ''' Write the finalized pixel color values to a PNG. ''' image = Image.fromarray(self.image_array) image = image.resize(self.image_size, Image.NEAREST) image.save(self.IMAGE_PATH) def is_prime(self, x): ''' Return True if x is prime and False otherwise. ''' if x in self.is_prime_list: return True elif x in self.is_not_prime_list: return False if not isinstance(x, int) or x < 0: raise NumberError if x==0 or x == 1: self.is_not_prime_list.append(x) return False if x == 2: self.is_prime_list.append(x) return True for y in range(2, math.floor(math.sqrt(x))+1): if x%y == 0: self.is_not_prime_list.append(x) return False self.is_prime_list.append(x) return True def is_triangular(self, x): ''' Return True if x is triangular and False otherwise. ''' for y in range(1, x+1): t = (y*(y+1)) / 2 if t == x: return True elif t > x: return False def is_square(self, x): ''' Return True if x is square and False otherwise. ''' for y in range(1, x+1): s = y**2 if s == x: return True elif s > x: return False def is_pentagonal(self, x): ''' Return True if x is pentagonal and False otherwise. ''' for y in range(1, x+1): p = (y*(3*y-1)) / 2 if p == x: return True elif p > x: return False def is_hexagonal(self, x): ''' Return True if x is hexagonal and False otherwise. ''' for y in range(1, x+1): h = y*(2*y-1) if h == x: return True elif h > x: return False def is_heptagonal(self, x): ''' Return True if x is heptagonal and False otherwise. ''' for y in range(1, x+1): h = (y*(5*y-3)) / 2 if h == x: return True elif h > x: return False def is_octogonal(self, x): ''' Return True if x is octogonal and False otherwise. ''' for y in range(1, x+1): o = y*(3*y-2) if o == x: return True elif o > x: return False def is_nonagonal(self, x): ''' Return True if x is nonagonal and False otherwise. ''' for y in range(1, x+1): n = (y*(7*y-5)) / 2 if n == x: return True elif n > x: return False def is_decagonal(self, x): ''' Return True if x is decagonal and False otherwise. ''' for y in range(1, x+1): d = 4*y**2 - 3*y if d == x: return True elif d > x: return False def is_hendecagonal(self, x): ''' Return True if x is hendecagonal and False otherwise. ''' for y in range(1, x+1): h = (9*y**2 - 7*y) / 2 if h == x: return True elif h > x: return False def is_dodecagonal(self, x): ''' Return True if x is dodecagonal and False otherwise. ''' for y in range(1, x+1): d = 5*y**2 - 4*y if d == x: return True elif d > x: return False def is_fibonacci(self, x): ''' Return True for numbers in the Fibonacci sequence and False otherwise. ''' f1 = 0 f2 = 1 while True: f = f1 + f2 if f == x: return True elif f > x: return False else: f1 = f2 f2 = f def is_factorial(self, x): ''' Return True for factorials and False otherwise. ''' for y in range(1, x+1): f = 1 for z in reversed(range(1, y+1)): f = f*z if f == x: return True elif f > x: return False def is_mersenne_prime(self, x): ''' Return True for Mersenne primes and False otherwise. ''' for y in range(1, x+1): m = 2**y-1 if m == x and self.is_prime(x): return True elif m > x: return False def is_a030513(self, x): ''' Return True for numbers in A030513 and False otherwise. https://oeis.org/A030513 Numbers with 4 divisors. ''' divisors = [] for y in range(1, x+1): if x%y == 0: divisors.append(y) if len(divisors) > 4: return False if len(divisors) == 4: return True else: return False def is_a050704(self, x): ''' Return True for numbers in A050704 and False otherwise. https://oeis.org/A050704 Composite numbers k with the property that k minus the sum of the prime factors of k is prime. ''' if x == 1: return False primes = [] prime_factors = [] if self.is_prime(x): primes.append(x) else: primes = [p for p in self.is_prime_list if p <= x/2] d = x for prime in primes: while True: if d%prime == 0: prime_factors.append(prime) d //= prime else: break sum_of_prime_factors = 0 for prime_factor in prime_factors: sum_of_prime_factors += prime_factor k_minus_sum_of_prime_factors = x - sum_of_prime_factors if self.is_prime(k_minus_sum_of_prime_factors): return True else: return False def is_a037003(self, x): ''' Return True for numbers in A037003 and False otherwise. https://oeis.org/A037003 Positions of the digit '4' in the decimal expansion of Pi. ''' if self.DIGITS_OF_PI[x-1] == '4': return True else: return False def bake_pi(self, num_digits): ''' Set the value of DIGITS_OF_PI to the decimal expansion of pi for any arbitrary length. ''' mpmath.mp.dps = num_digits pi = mpmath.mp.pi self.DIGITS_OF_PI = str(pi)[2:] def debug_tests(self): ''' Outputs a list of numbers in the sequences for verification. ''' list_={'prime': [], 'triangular': [], 'square': [], 'pentagonal': [], 'hexagonal': [], 'heptagonal': [], 'hexagonal': [], 'octogonal': [], 'nonagonal': [], 'decagonal': [], 'hendecagonal': [], 'dodecagonal': [], 'fibonacci': [], 'factorial': [], 'mersenne_prime': [], 'a030513': [], 'a050704': [], 'a037003': []} for x in range(1, 100): if self.is_prime(x): list_['prime'].append(x) if self.is_triangular(x): list_['triangular'].append(x) if self.is_square(x): list_['square'].append(x) if self.is_pentagonal(x): list_['pentagonal'].append(x) if self.is_hexagonal(x): list_['hexagonal'].append(x) if self.is_heptagonal(x): list_['heptagonal'].append(x) if self.is_octogonal(x): list_['octogonal'].append(x) if self.is_nonagonal(x): list_['nonagonal'].append(x) if self.is_decagonal(x): list_['decagonal'].append(x) if self.is_hendecagonal(x): list_['hendecagonal'].append(x) if self.is_dodecagonal(x): list_['dodecagonal'].append(x) if self.is_fibonacci(x): list_['fibonacci'].append(x) if self.is_factorial(x): list_['factorial'].append(x) if self.is_mersenne_prime(x): list_['mersenne_prime'].append(x) if self.is_a030513(x): list_['a030513'].append(x) if self.is_a050704(x): list_['a050704'].append(x) if self.is_a037003(x): list_['a037003'].append(x) input(list_) if __name__ == '__main__': pass
py
1a448463f39fcc57eadbfe91f59bc8c77371cec9
import numpy as np def freqtag_FFT(data: np.ndarray, fsamp: float | int) -> list[np.ndarray]: """ Applies the Discrete Fourier Transform on a 2D array of EEG data. Args: data: (m sensors, n time points) array. Time series of each sensor. fsamp: Sampling rate in Hz. Returns: List containing 4 arrays in the following order: (m sensors, n/2 bins) array: Amplitude spectrum of each sensor. (m sensors, n/2 bins) array: Phase spectrum of each sensor. (n/2 bins) array: Available frequencies in the data. (m sensors, n bins) array: Complex Fourier spectrum of each sensor. """ # TODO: Raise an error if invalid input is passed. num_points = data.shape[-1] midpoint = round(num_points / 2) untrimmed_freqs = np.fft.fftfreq(num_points, d=1 / fsamp) fftcomp = np.fft.fftn(data, axes=[-1]) untrimmed_phase = np.angle(fftcomp) # Get amplitude, taking care of doubled DC or Nyquist frequencies. untrimmed_amp = np.abs(fftcomp) untrimmed_amp[:, 0] = untrimmed_amp[:, 0] / 2 if num_points % 2 == 0: # TODO: Check odd num_points handled correctly. untrimmed_amp[:, midpoint] = untrimmed_amp[:, midpoint] / 2 untrimmed_amp = untrimmed_amp / num_points # Trim to remove opposite side of FFT operation. phase = untrimmed_phase[:, :midpoint] freqs = untrimmed_freqs[:midpoint] amp = untrimmed_amp[:, :midpoint] return [amp, phase, freqs, fftcomp]
py
1a4484774933998746374b7d4f3fee765e74effd
from bddrest import response, when, status from nanohttp import json from sqlalchemy import Unicode, Integer from restfulpy.controllers import JSONPatchControllerMixin, ModelRestController from restfulpy.orm import commit, DeclarativeBase, Field, DBSession, \ FilteringMixin, PaginationMixin, OrderingMixin, ModifiedMixin from restfulpy.testing import ApplicableTestCase from restfulpy.exceptions import SQLError class SQLErrorCheckingModel( ModifiedMixin, FilteringMixin, PaginationMixin, OrderingMixin, DeclarativeBase ): __tablename__ = 'sql_error_checking_model' id = Field(Integer, primary_key=True) title = Field(Unicode(50), unique=True, nullable=False) class Root(ModelRestController): __model__ = SQLErrorCheckingModel @json @commit def post(self): m = SQLErrorCheckingModel() m.update_from_request() DBSession.add(m) return m @json @SQLErrorCheckingModel.expose def get(self, title: str=None): query = SQLErrorCheckingModel.query if title: return query.filter(SQLErrorCheckingModel.title == title)\ .one_or_none() return query class TestSqlExceptions(ApplicableTestCase): __controller_factory__ = Root def test_sql_errors(self): with self.given( 'Testing SQL exceptions', '/', 'POST', form=dict(title='test') ): assert response.json['title'] == 'test' when('Posting gain to raise a unique_violation sql error') assert status == 409 def test_invalid_sql_error(self): assert '500 Internal server error' == SQLError.map_exception(ValueError())
py
1a44859286adb515342052960c1814130c51bf21
import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import DropPath, to_2tuple, trunc_normal_ class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x class WindowAttention(nn.Module): r""" Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 # define a parameter table of relative position bias self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask=None): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: nW = mask.shape[0] attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x def extra_repr(self) -> str: return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' def flops(self, N): # calculate flops for 1 window with token length of N flops = 0 # qkv = self.qkv(x) flops += N * self.dim * 3 * self.dim # attn = (q @ k.transpose(-2, -1)) flops += self.num_heads * N * (self.dim // self.num_heads) * N # x = (attn @ v) flops += self.num_heads * N * N * (self.dim // self.num_heads) # x = self.proj(x) flops += N * self.dim * self.dim return flops class SwinTransformerBlock(nn.Module): r""" Swin Transformer Block. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resulotion. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): super().__init__() self.dim = dim self.input_resolution = input_resolution self.num_heads = num_heads self.window_size = window_size self.shift_size = shift_size self.mlp_ratio = mlp_ratio if min(self.input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = 0 self.window_size = min(self.input_resolution) assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" self.norm1 = norm_layer(dim) self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if self.shift_size > 0: # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 h_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) w_slices = (slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None)) cnt = 0 for h in h_slices: for w in w_slices: img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask) def forward(self, x): H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" shortcut = x x = self.norm1(x) x = x.view(B, H, W, C) # cyclic shift if self.shift_size > 0: shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C # reverse cyclic shift if self.shift_size > 0: x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: x = shifted_x x = x.view(B, H * W, C) # FFN x = shortcut + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x def extra_repr(self) -> str: return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" def flops(self): flops = 0 H, W = self.input_resolution # norm1 flops += self.dim * H * W # W-MSA/SW-MSA nW = H * W / self.window_size / self.window_size flops += nW * self.attn.flops(self.window_size * self.window_size) # mlp flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio # norm2 flops += self.dim * H * W return flops class PatchMerging(nn.Module): r""" Patch Merging Layer. Args: input_resolution (tuple[int]): Resolution of input feature. dim (int): Number of input channels. norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm): super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def forward(self, x): """ x: B, H*W, C """ H, W = self.input_resolution B, L, C = x.shape assert L == H * W, "input feature has wrong size" assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even." x = x.view(B, H, W, C) x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C x = self.norm(x) x = self.reduction(x) return x def extra_repr(self) -> str: return f"input_resolution={self.input_resolution}, dim={self.dim}" def flops(self): H, W = self.input_resolution flops = H * W * self.dim flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim return flops class BasicLayer(nn.Module): """ A basic Swin Transformer layer for one stage. Args: dim (int): Number of input channels. input_resolution (tuple[int]): Input resolution. depth (int): Number of blocks. num_heads (int): Number of attention heads. window_size (int): Local window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. drop (float, optional): Dropout rate. Default: 0.0 attn_drop (float, optional): Attention dropout rate. Default: 0.0 drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. """ def __init__(self, dim, input_resolution, depth, num_heads, window_size, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False): super().__init__() self.dim = dim self.input_resolution = input_resolution self.depth = depth self.use_checkpoint = use_checkpoint # build blocks self.blocks = nn.ModuleList([ SwinTransformerBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer) for i in range(depth)]) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer) else: self.downsample = None def forward(self, x): for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if self.downsample is not None: x = self.downsample(x) return x def extra_repr(self) -> str: return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}" def flops(self): flops = 0 for blk in self.blocks: flops += blk.flops() if self.downsample is not None: flops += self.downsample.flops() return flops class PatchEmbed(nn.Module): r""" Image to Patch Embedding Args: img_size (int): Image size. Default: 224. patch_size (int): Patch token size. Default: 4. in_chans (int): Number of input image channels. Default: 3. embed_dim (int): Number of linear projection output channels. Default: 96. norm_layer (nn.Module, optional): Normalization layer. Default: None """ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]] self.img_size = img_size self.patch_size = patch_size self.patches_resolution = patches_resolution self.num_patches = patches_resolution[0] * patches_resolution[1] self.in_chans = in_chans self.embed_dim = embed_dim self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) if norm_layer is not None: self.norm = norm_layer(embed_dim) else: self.norm = None def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C if self.norm is not None: x = self.norm(x) return x def flops(self): Ho, Wo = self.patches_resolution flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) if self.norm is not None: flops += Ho * Wo * self.embed_dim return flops class SwinTransformer(nn.Module): r""" Swin Transformer A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Args: img_size (int | tuple(int)): Input image size. Default 224 patch_size (int | tuple(int)): Patch size. Default: 4 in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 embed_dim (int): Patch embedding dimension. Default: 96 depths (tuple(int)): Depth of each Swin Transformer layer. num_heads (tuple(int)): Number of attention heads in different layers. window_size (int): Window size. Default: 7 mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None drop_rate (float): Dropout rate. Default: 0 attn_drop_rate (float): Attention dropout rate. Default: 0 drop_path_rate (float): Stochastic depth rate. Default: 0.1 norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. ape (bool): If True, add absolute position embedding to the patch embedding. Default: False patch_norm (bool): If True, add normalization after patch embedding. Default: True use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False """ def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, **kwargs): super().__init__() self.num_classes = num_classes self.num_layers = len(depths) self.embed_dim = embed_dim self.ape = ape self.patch_norm = patch_norm self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.mlp_ratio = mlp_ratio # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if self.patch_norm else None) num_patches = self.patch_embed.num_patches patches_resolution = self.patch_embed.patches_resolution self.patches_resolution = patches_resolution # absolute position embedding if self.ape: self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) trunc_normal_(self.absolute_pos_embed, std=.02) self.pos_drop = nn.Dropout(p=drop_rate) # stochastic depth dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule # build layers self.layers = nn.ModuleList() for i_layer in range(self.num_layers): layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer), input_resolution=(patches_resolution[0] // (2 ** i_layer), patches_resolution[1] // (2 ** i_layer)), depth=depths[i_layer], num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, use_checkpoint=use_checkpoint) self.layers.append(layer) self.norm = norm_layer(self.num_features) self.avgpool = nn.AdaptiveAvgPool1d(1) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'absolute_pos_embed'} @torch.jit.ignore def no_weight_decay_keywords(self): return {'relative_position_bias_table'} def forward_features(self, x): x = self.patch_embed(x) if self.ape: x = x + self.absolute_pos_embed x = self.pos_drop(x) for layer in self.layers: x = layer(x) x = self.norm(x) # B L C x = self.avgpool(x.transpose(1, 2)) # B C 1 x = torch.flatten(x, 1) return x def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def flops(self): flops = 0 flops += self.patch_embed.flops() for i, layer in enumerate(self.layers): flops += layer.flops() flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers) flops += self.num_features * self.num_classes return flops def swin_transformer(**kwargs): net = SwinTransformer(**kwargs) return net if __name__ == "__main__": net = swin_transformer(img_size=224, patch_size=4, in_chans=3, num_classes=5) from torchsummary import summary import os os.environ['CUDA_VISIBLE_DEVICES'] = '2' net = net.cuda() summary(net,input_size=(3,224,224),batch_size=1,device='cuda')
py
1a4485de215c1e60f0a2ee5e532ae8d4420b09d3
# integration between serial and http # contact array as events (best option) # [ # { # "wsnNodeId" : <BeaconID> "string" # "eventType" : 901, # "timestamp" : <timestamp>, # "payload" : { # "EndNodeID" : <nodeID> "string" # "lastRSSI" : <int> # "maxRSSI" : <int> # "pktCounter" : <int> # } # ] import serial import logging import datetime import time import struct import requests import json import sys # from collections import namedtuple # from array import array ### global data and defines ### timeStart = int(time.time()) MAX_CONTACTS_LIST = 1000 #MAX number of contacts locally buffered # Serial # start character = 42 (0x2A) ('*') # start sequence = 4 times start char = 42, 42, 42, 42 START_CHAR = 0x2A START_BUF = 0x2A2A2A #this is not really a buffer BAUD_RATE = 1000000 # SERIAL_PORT = "/dev/ttyACM0" SERIAL_PORT = "/dev/ttyUSB0" #serial name, something like "/dev/ttyUSB0" on linux, something like "COM0" on windows # back-end EVENT_BECON_CONTACT = 901 #defined on the server, do not change! urlDev_CLIMB = 'https://climbdev.smartcommunitylab.it/v2/api/event/TEST/adca3db3-68d1-4197-b834-a45d61cf1c21/vlab' #TODO: REMOVE THIS FROM THE PUBLIC REPOSITORY urlDev = 'https://climbdev.smartcommunitylab.it/v2/api/event/TEST/4220a8bb-3cf5-4076-b7bd-9e7a1ff7a588/vlab' #TODO: REMOVE THIS FROM THE PUBLIC REPOSITORY urlProd = ' https://climb.smartcommunitylab.it/v2/api/event/TEST/17ee8383-4cb0-4f58-9759-1d76a77f9eff/vlab' #TODO: REMOVE THIS FROM THE PUBLIC REPOSITORY headers = {'Authorization': 'Bearer 831a2cc0-48bd-46ab-ace1-c24f767af8af'} #TODO: REMOVE THIS FROM THE PUBLIC REPOSITORY MIN_POST_PERIOD_S = 60 #time between buffer send. (every MIN_POST_PERIOD_S the buffer is sent to the server) # contactArray = [ # { # "wsnNodeId" : "Beaconid_01", #<string> # "eventType" : EVENT_BECON_CONTACT, #<int> # "timestamp" : timeStart, #<timestamp> # "payload" : { # "EndNodeID": "VelaLab_EndNode_05", #<string> # "lastRSSI": -30, #<int> # "maxRSSI": -20, #<int> # "pktCounter" : 15 #<int> # } # } # { # "wsnNodeId" : "Beaconid_01", # "eventType" : EVENT_BECON_CONTACT, # "timestamp" : timeStart, # "payload" : { # "EndNodeID": "VelaLab_EndNode_05", # "lastRSSI": -30, # "maxRSSI": -20, # "pktCounter" : 15 # } # } # ] # {"wsnNodeId":"Beaconid_01", "eventType":EVENT_BECON_CONTACT, "timestamp":timeStart, "payload":{"EndNodeID":"VelaLab_EndNode_05", "lastRSSI":-30, "maxRSSI":-20, "pktCounter":15}} ### Init ### # Log init # TODO: files are never closed, with long experiments it may create huge files! Handle the problem somehow # Application log and data log are handled in the same way, but with different endpoints (different files) LOG_LEVEL = logging.DEBUG timestr = time.strftime("%Y%m%d_%H%M%S") # Data logger nameDataLog = "dataLogger" filenameDataLog = timestr + "_data.log" # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') formatterDataLog = logging.Formatter('%(message)s') handler = logging.FileHandler(filenameDataLog) handler.setFormatter(formatterDataLog) dataLogger = logging.getLogger(nameDataLog) dataLogger.setLevel(LOG_LEVEL) dataLogger.addHandler(handler) # logging.basicConfig(filename=filenameLog,level=LOG_LEVEL,format='%(message)s') print("Started data log on file:", filenameDataLog) # Application logger nameAppLog = "appLogger" filenameAppLog = timestr + "_app.log" # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') formatterAppLog = logging.Formatter("%(asctime)s %(message)s", "%Y-%m-%d %H:%M:%S") handler = logging.FileHandler(filenameAppLog) handler.setFormatter(formatterAppLog) appLogger = logging.getLogger(nameAppLog) appLogger.setLevel(LOG_LEVEL) appLogger.addHandler(handler) # logging.basicConfig(filename=filenameLog,level=LOG_LEVEL,format='%(message)s') print("Started application log on file:", filenameAppLog) # print("type appLogger:", type(appLogger), "appLogger:", appLogger) # disable log for post requests - generated by urllib3 urllib3_log = logging.getLogger("urllib3") urllib3_log.setLevel(logging.CRITICAL) # Serial init ser = serial.Serial(SERIAL_PORT, BAUD_RATE) if ser.is_open: print("Serial Port already open!", ser.port, "open before initialization... closing first") appLogger.debug("Serial Port already open! %s open before initialization... closing first", ser.port) ser.close() time.sleep(10) # end if ser.is_open # init lists tmpContactList = [] contactList = [] timePostLast = time.time() ### run loop ### try: while(1): if ser.is_open: try: bytesWaiting = ser.in_waiting except Exception as e: print("Serial Port input exception:", e) appLogger.debug("Serial Port input exception: %s", e) bytesWaiting = 0 ser.close() time.sleep(10) continue if bytesWaiting > 0: # to print raw data in hex decomment here and comment the rest of the while(1) # print("\nSerial Waiting:", bytesWaiting) # bufferSerial = ser.read(bytesWaiting) # bufferSerialNum = list(bufferSerial) # for i in range(0,bytesWaiting): # print("", format(bufferSerialNum[i], "02X"), end='', flush=True) # startChar = 1; # to start decoding packets decmment here startChar = int.from_bytes(ser.read(1), byteorder='little', signed=False) if startChar == START_CHAR: startBuf = int.from_bytes(ser.read(3), byteorder='little', signed=False) if startBuf == START_BUF: # Received START: decode packet header nodeID = int.from_bytes(ser.read(1), byteorder='little', signed=False) counter = int.from_bytes(ser.read(1), byteorder='little', signed=False) pktLast = (counter & 128) pktCount = counter & 127 # print("Counter:", pktCount, "type:", type(pktCount)) tmpBuf = ser.read(2) dataLen = int.from_bytes(tmpBuf, byteorder='little', signed=False) # print("Data Length:", dataLen, "type:", type(dataLen)) if (dataLen-1) % 9 != 0: #TODO: 9 is the single node report size. Parametrize it! # print("\n#### Corrupted packetLength #### NodeID:", nodeID, "\tcounter", pktCount, "\tdataLen", dataLen-1, "\tendChar", endChar) appLogger.debug("PKT CorruptedLen NodeID %s counter %d dataLen %d", nodeID, pktCount, dataLen-1) continue #end if # read packet payload dataBuf = ser.read(dataLen-1) # NB: ser.read is blocking! TODO: maybe use the non blocking version with a timeout of 100ms endChar = ser.read(1) if endChar != b'\n': #TODO: parametrize endchar '\n' numBuf = list(dataBuf) payloadStr = "" for i in range(0,dataLen-1): payloadStr = payloadStr + ' {:02X}'.format(numBuf[i]) endBuf = list(endChar) # payloadStr = payloadStr + " {:02X}".format(endBuf[0]) + " {:02X}".format(endBuf[1]) payloadStr = payloadStr + ' {:02X}'.format(endBuf[0]) # print("\n#### Corrupted endChar #### NodeID:", nodeID, "\tcounter", pktCount, "\tdataLen", dataLen-1, "\tendChar", endChar) appLogger.debug("PKT CorruptedEnd NodeID %s counter %d dataLen %d payloadHex:%s", nodeID, pktCount, dataLen-1, payloadStr) continue #end if endChar != b'\n\x00' # timestamp for received packet timenow = time.time() timestamp = int(round(timenow * 1000)) timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timenow)) hourstr = time.strftime("[%H:%M:%S]", time.localtime(timenow)) # decode payload: Contact data (9Byte) = [node_id(6Byte)][last_rssi(1Byte)][max_rssi(1Byte)][rx_pkt_count(1Byte)] # node_id saved as big_endiann tmpContactList = [] corrupted = False i = 0; while i <= dataLen-10: #TODO: 10 is the single node report size - 1. Parametrize it tmpContact = struct.unpack_from("6sbbB", dataBuf, i) # print("type tmpContact:", type(tmpContact), "tmpContact:", tmpContact) # print("type tmpContact[0]:", type(tmpContact[0]), "tmpContact[0]:", tmpContact[0]) beaconIDstr = "" tmplist = list(tmpContact[0]) # next three lines are used to check if the first 5 bytes are zero, not to be used in the general case # if (tmplist[0] != 0) or (tmplist[1] != 0) or (tmplist[2] != 0) or (tmplist[3] != 0) or (tmplist[4] != 0): # corrupted = True # break for strid in tmplist: beaconIDstr = beaconIDstr + "{:02X}".format(strid) # print("type nodeIDstr:", type(nodeIDstr), "nodeIDstr:", nodeIDstr) tmpContactList.append({"wsnNodeId":beaconIDstr, "eventType":EVENT_BECON_CONTACT, "timestamp":timestamp, "payload":{"EndNodeID":str(nodeID), "lastRSSI":tmpContact[1], "maxRSSI":tmpContact[2], "pktCounter":tmpContact[3]}}) i = i + 9 # TODO: ingle node report size. Parametrize it # end while if corrupted: numBuf = list(dataBuf) payloadStr = "" for i in range(0,dataLen-1): payloadStr = payloadStr + ' {:02X}'.format(numBuf[i]) endBuf = list(endChar) # payloadStr = payloadStr + ' {:02X}'.format(endBuf[0]) + ' {:02X}'.format(endBuf[1]) payloadStr = payloadStr + ' {:02X}'.format(endBuf[0]) # print("\n#### Corrupted payload #### NodeID:", nodeID, "\tcounter", pktCount, "\tdataLen", dataLen-1, "\tendChar", endChar) appLogger.debug("PKT CorruptedPayload NodeID %s counter %d dataLen %d payloadHex:%s", nodeID, pktCount, dataLen-1, payloadStr) tmpContactList = [] continue #end if corrupted # print pkt header print("") # print(timestr, "nodeID:", nodeID, "\tlast:", pktLast, "\tcounter:", pktCount, "\tdataLen:", dataLen-1, "\tendChar:", endChar) # demo print, remaps nodesIDs for the terminal plot # print(hourstr, "\tNodeID:", nodeID, "\tPKT counter:", pktCount, "\t# beacons:", int((dataLen-1)/9) ) #if nodeID == 4: # nodeIDprint = 1 #elif nodeID == 132: # nodeIDprint = 2 #elif nodeID == 135: # nodeIDprint = 3 #else: # nodeIDprint = 0 nodeIDprint = nodeID print(hourstr, "NodeID:", nodeIDprint) # print packet payload as hex # numBuf = list(dataBuf) # payloadStr = "hex:" # for i in range(0,dataLen-1): # print(' {:02X}'.format(numBuf[i]), end='') # payloadStr = payloadStr + ' {:02X}'.format(numBuf[i]) # # print("") # print contacts from contactList for item in tmpContactList: # print("BeaconID: ", item["wsnNodeId"], end=" ") # # print(" Timestamp: ", item["timestamp"], end="") # print("lastRSSI: ", item["payload"]["lastRSSI"], end=" ") # print("maxRSSI: ", item["payload"]["maxRSSI"], end=" ") # print("pktcounter: ", item["payload"]["pktCounter"]) # demo print # idstr = item["wsnNodeId"] # print("type idstr:", type(idstr), "idstr:", idstr) # print("BeaconID:", item["wsnNodeId"][-2:], "\tmax RSSI:", item["payload"]["maxRSSI"], "\t# contacts:", item["payload"]["pktCounter"]) print("BeaconID:", item["wsnNodeId"][-2:], "\tcontacts:", item["payload"]["pktCounter"], "\tmax RSSI:", item["payload"]["maxRSSI"]) ## end for ### log on local data file # log from contactList contactStr = "" for item in tmpContactList: tmps = " " + item["wsnNodeId"] + " " + str(item["payload"]["lastRSSI"]) + " " + str(item["payload"]["maxRSSI"]) + " " + str(item["payload"]["pktCounter"]) contactStr = contactStr + tmps # end for dataLogger.debug("%s %s NodeID %d Last %d Counter %d DataLen %d Contacts [ID-last-max-cnt]%s", timestr, timestamp, nodeID, pktLast, pktCount, dataLen-1, contactStr) ### send data to server contactList.extend(tmpContactList) #TODO: check the number of contacts in the buffer before adding the new contacts numContacts = len(contactList) if numContacts > MAX_CONTACTS_LIST: #TODO: add an error print somewhere if the buffer size is exceeded del contactList[:(numContacts-MAX_CONTACTS_LIST)] numContacts = len(contactList) # print("Current packet:", len(tmpContactList), "contacts. Buffer to send:", numContacts, "contacts") timePost = time.time() if timePost - timePostLast > MIN_POST_PERIOD_S: print("POST request: sending", numContacts, "contacts...") exc = 0 try: r = requests.post(urlDev, json=contactList, headers=headers) #blocking call, may take some time to execute except Exception as e: print("POST request exception:", e) appLogger.debug("POST request exception: %s", e) exc = 1 if exc == 0: if r.status_code == requests.codes.ok: print("POST Response: OK") appLogger.debug("POST request with %d contacts. Response: OK", numContacts) contactList = [] else: print("POST Response: ERROR code:", r.status_code, "error:", r.text) appLogger.debug("POST request with %d contacts. Response: ERROR! code: %d error: %s", numContacts, r.status_code, r.text) # end if r.status_code # end if exc == 0 timePostLast = time.time() # end if timePost - timePostLast > MIN_POST_PERIOD_S ### cleanup dataBuf = None # end if startBuf == START_BUF else: # received random char NOT packet start # startList = list(startChar) payloadStr = "{:02X}".format(startChar) print(payloadStr, end=" ") appLogger.debug("START charHex: %s", payloadStr) # end if startChar == 42 # end if bytesWaiting > 0 else: # !ser.is_open (serial port is not open) print('Serial Port closed! Trying to open port:', ser.port) try: ser.open() except Exception as e: print("Serial Port open exception:", e) appLogger.debug("Serial Port exception: %s", e) time.sleep(10) continue print("Serial Port open!") appLogger.debug("Serial Port open") ser.reset_input_buffer() contactList = [] # end if ser.is_open # end while(1) except KeyboardInterrupt as key: # print("Keyboard Interrupt! Exit") print("Shutdown requested... exiting") appLogger.debug("Keyboard Interrupt, Exit") except Exception as e: print("Running exception:", e) appLogger.debug("Running exception: %s", e) # close and clean before exit if ser.is_open: print("Closing Serial Port") ser.close() # end if ser.is_open print("Closing Data Log Handler") log = logging.getLogger(nameDataLog) #nameDataLog # print("Log:", log) logHandlers = log.handlers[:] # print("Log Handlers:", logHandlers) for handler in logHandlers: # print("Closing Log Handler:", handler) handler.close() # print("Closing Log:", logHandlers) log.removeHandler(logHandlers) print("Closing App Log Handler") log = logging.getLogger(nameAppLog) #nameDataLog # print("Log:", log) logHandlers = log.handlers[:] # print("Log Handlers:", logHandlers) for handler in logHandlers: # print("Closing Log Handler:", handler) handler.close() # print("Closing Log:", logHandlers) log.removeHandler(logHandlers) sys.exit(0)
py
1a448621d4b418cb598fdeecb0d893637a5db8a9
# Plugin for gallery_get. # Each definition can be one of the following: # - a string # - a regex string # - a function that takes source as a parameter and returns an array or a string. (You may assume that re and urllib are already imported.) # If you comment out a parameter, it will use the default defined in __init__.py # identifier (default = name of this plugin after "plugin_") : If there's a match, we'll attempt to download images using this plugin. # title: parses the gallery page for a title. This will be the folder name of the output gallery. # redirect: if the links in the gallery page go to an html instead of an image, use this to parse the gallery page. # direct_links: if redirect is non-empty, this parses each redirect page for a single image. Otherwise, this parses the gallery page for all images. direct_links = r'meta property="og:image" content="(.+?)"' # same_filename (default=False): if True, uses filename specified on remote link. Otherwise, creates own filename with incremental index.
py
1a448766403f2f2c6bdf8ecc04ef7b214cf55af3
# coding=utf8 import numpy as np def rerec(bbox): ''' Convert to square :param bbox: :return: ''' h = bbox[:, 2] - bbox[:, 0] + 1 w = bbox[:, 3] - bbox[:, 1] + 1 max_l = np.maximum(h, w) bbox[:, 0] = np.round(bbox[:, 0] + (h - max_l) * 0.5) bbox[:, 1] = np.round(bbox[:, 1] + (w - max_l) * 0.5) bbox[:, 2] = bbox[:, 0] + max_l - 1 bbox[:, 3] = bbox[:, 1] + max_l - 1 return bbox
py
1a4487aa2926bb5ffd9332a8a30a9ae081d6df31
#hash(object) print(hash(1)) print(hash('a'))
py
1a448801aa3a4200d01cf0a2c7bf939cafdea682
#!/usr/bin/env python from setuptools import setup, find_packages setup( name='hydra-json', version='0.1', description='App to import and export hydra networks in JSON format', packages=find_packages(), include_package_data=True, install_requires=[], entry_points=''' [console_scripts] hydra-json=hydra_json.cli:start_cli ''', )
py
1a448803503cf7d163587edd632e8b19ae19a797
import os from binascii import unhexlify import pytest from cose.algorithms import EdDSA from cose.keys.curves import Ed448, Ed25519, X448, X25519 from cose.exceptions import CoseInvalidKey, CoseIllegalKeyType, CoseUnsupportedCurve, CoseException, CoseIllegalKeyOps from cose.keys import OKPKey, CoseKey from cose.keys.keyops import SignOp, MacVerifyOp from cose.keys.keyparam import KpKty, OKPKpCurve, OKPKpX, OKPKpD, KpAlg, KpKeyOps ############################################################### # OKP key checks ############################################################### from cose.keys.keytype import KtyOKP, KtyEC2, KtySymmetric def _is_valid_okp_key(key: OKPKey): check1 = (KpKty in key and OKPKpCurve in key) and (OKPKpX in key or OKPKpD in key) check2 = key[OKPKpCurve] in [X25519, X448, Ed25519, Ed448] return check2 and check1 @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1), (KpKty, 'OKP'), (KpKty, 1), ('KTY', KtyOKP), ('KTY', 1), (1, KtyOKP), (1, 'OKP')]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, X25519), ('CURVE', X25519), (-1, X25519)]) @pytest.mark.parametrize('x_attr, x_value', [(OKPKpX, os.urandom(32)), ('X', os.urandom(32)), (-2, os.urandom(32))]) @pytest.mark.parametrize('d_attr, d_value', [(OKPKpD, os.urandom(32)), ('D', os.urandom(32)), (-4, os.urandom(32))]) def test_okp_keys_from_dicts(kty_attr, kty_value, crv_attr, crv_value, x_attr, x_value, d_attr, d_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, x_attr: x_value, d_attr: d_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1)]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, Ed25519)]) @pytest.mark.parametrize('d_attr, d_value', [(OKPKpD, os.urandom(32)), ('D', os.urandom(32)), (-4, os.urandom(32))]) def test_okp_private_key_from_dicts(kty_attr, kty_value, crv_attr, crv_value, d_attr, d_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, d_attr: d_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('kty_attr, kty_value', [(KpKty, KtyOKP), ('KTY', 'OKP'), (1, 1)]) @pytest.mark.parametrize('crv_attr, crv_value', [(OKPKpCurve, Ed448), ('CURVE', Ed448), (-1, Ed448)]) @pytest.mark.parametrize('x_attr, x_value', [(OKPKpX, os.urandom(32)), ('X', os.urandom(32)), (-2, os.urandom(32))]) def test_okp_public_keys_from_dicts(kty_attr, kty_value, crv_attr, crv_value, x_attr, x_value): # The public and private values used in this test do not form a valid elliptic curve key, # but we don't care about that here d = {kty_attr: kty_value, crv_attr: crv_value, x_attr: x_value} cose_key = CoseKey.from_dict(d) assert _is_valid_okp_key(cose_key) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448, 4, 'X25519', 'X448']) def test_okp_key_generation_encoding_decoding(crv): trails = 256 for i in range(trails): okp_test = OKPKey.generate_key(crv=crv) okp_encoded = okp_test.encode() okp_decoded = CoseKey.decode(okp_encoded) assert _is_valid_okp_key(okp_decoded) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448, 'X25519', 4, 5]) def test_okp_key_generation(crv): key = OKPKey.generate_key(crv) assert _is_valid_okp_key(key) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) def test_okp_key_construction(crv): key = OKPKey(crv=crv, x=os.urandom(32), d=os.urandom(32), optional_params={'ALG': 'EDDSA'}) assert _is_valid_okp_key(key) serialized = key.encode() _ = CoseKey.decode(serialized) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) def test_fail_on_missing_key_values(crv): with pytest.raises(CoseInvalidKey) as excinfo: _ = OKPKey(crv=crv) assert "Either the public values or the private value must be specified" in str(excinfo.value) def test_fail_on_missing_crv_attr(): cose_key = {KpKty: KtyOKP, OKPKpX: os.urandom(32), OKPKpD: os.urandom(32)} with pytest.raises(CoseInvalidKey) as excinfo: _ = CoseKey.from_dict(cose_key) assert "COSE curve cannot be None" in str(excinfo.value) @pytest.mark.parametrize('crv', [X25519, X448, Ed25519, Ed448]) @pytest.mark.parametrize('kty', [KtyEC2, KtySymmetric, 2, 4]) def test_fail_on_illegal_kty(crv, kty): params = {KpKty: kty} with pytest.raises(CoseIllegalKeyType) as excinfo: _ = OKPKey(crv=crv, x=os.urandom(32), d=os.urandom(32), optional_params=params) assert "Illegal key type in OKP COSE Key" in str(excinfo.value) def test_remove_empty_keyops_list(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed25519, KpKeyOps: []} key = CoseKey.from_dict(cose_key) assert KpKeyOps not in key def test_existing_non_empty_keyops_list(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed448, KpKeyOps: [SignOp]} key = CoseKey.from_dict(cose_key) assert KpKeyOps in key def test_key_ops_setter_getter(): key = OKPKey.generate_key('ED25519') key.key_ops = [SignOp] assert SignOp in key.key_ops with pytest.raises(CoseIllegalKeyOps) as excinfo: key.key_ops = [MacVerifyOp] assert "Invalid COSE key operation" in str(excinfo) def test_dict_operations_on_okp_key(): cose_key = {KpKty: KtyOKP, OKPKpD: os.urandom(16), KpAlg: EdDSA, OKPKpCurve: Ed448, KpKeyOps: [SignOp]} key = CoseKey.from_dict(cose_key) assert KpKty in key assert OKPKpD in key assert OKPKpX not in key assert 1 in key assert -4 in key assert KpAlg in key assert 'ALG' in key def test_unknown_key_attributes(): key = 'a401012004215820a3ff263595beb377d1a0ce1d04dad2d40966ac6bcb622051b84659184d5d9a326c7375626a656374206e616d6560' key = CoseKey.decode(unhexlify(key)) assert "subject name" in key def test_key_set_curve(): key = 'a401012006215820898ff79a02067a16ea1eccb90fa52246f5aa4dd6ec076bba0259d904b7ec8b0c2358208f781a095372f85b6d' \ '9f6109ae422611734d7dbfa0069a2df2935bb2e053bf35' key = CoseKey.decode(unhexlify(key)) assert key.crv == Ed25519 key.crv = X25519 assert key.crv == X25519 with pytest.raises(CoseUnsupportedCurve) as excinfo: key.crv = 3 # P-521 assert "Invalid COSE curve" in str(excinfo.value) key.crv = X448.identifier assert key.crv == X448 def test_key_generation_with_optional_parameters(): key = OKPKey.generate_key(crv='ED25519', optional_params={'KpKid': 4})
py
1a44889276d7b3cac6483a61278995444717afec
from ...core import (Function, I, Integer, Rational, cacheit, nan, oo, pi, sympify, zoo) from ...core.function import ArgumentIndexError, _coeff_isneg from ..combinatorial.factorials import RisingFactorial, factorial from .exponential import exp, log from .miscellaneous import sqrt def _rewrite_hyperbolics_as_exp(expr): expr = sympify(expr) return expr.xreplace({h: h.rewrite(exp) for h in expr.atoms(HyperbolicFunction)}) ############################################################################### # ######################### HYPERBOLIC FUNCTIONS ############################ # ############################################################################### class HyperbolicFunction(Function): """ Base class for hyperbolic functions. See Also ======== diofant.functions.elementary.hyperbolic.sinh diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.coth """ unbranched = True class sinh(HyperbolicFunction): r""" The hyperbolic sine function, `\frac{e^x - e^{-x}}{2}`. * sinh(x) -> Returns the hyperbolic sine of x See Also ======== diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.asinh """ def fdiff(self, argindex=1): """Returns the first derivative of this function.""" if argindex == 1: return cosh(self.args[0]) else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """Returns the inverse of this function.""" return asinh @classmethod def eval(cls, arg): from .trigonometric import sin arg = sympify(arg) if arg.is_Number: if arg in (oo, -oo, 0): return arg elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return nan i_coeff = arg.as_coefficient(I) if i_coeff is not None: return I * sin(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.func == asinh: return arg.args[0] if arg.func == acosh: x = arg.args[0] return sqrt(x - 1) * sqrt(x + 1) if arg.func == atanh: x = arg.args[0] return x/sqrt(1 - x**2) if arg.func == acoth: x = arg.args[0] return 1/(sqrt(x - 1) * sqrt(x + 1)) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): """Returns the next term in the Taylor series expansion.""" if n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) if len(previous_terms) >= 2: p = previous_terms[-2] return p * x**2 / (n*(n - 1)) else: return x**n / factorial(n) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): """Returns this function as a complex coordinate.""" from .trigonometric import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return self.expand(deep, **hints), Integer(0) else: return self, Integer(0) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() return sinh(re)*cos(im), cosh(re)*sin(im) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=deep, **hints) return re_part + im_part*I def _eval_expand_trig(self, **hints): arg = self.args[0] x = None if arg.is_Add: # TODO, implement more if deep stuff here x, y = arg.as_two_terms() else: coeff, terms = arg.as_coeff_Mul(rational=True) if coeff != 1 and coeff.is_Integer and terms != 1: x = terms y = (coeff - 1)*x if x is not None: return (sinh(x)*cosh(y) + sinh(y)*cosh(x)).expand(trig=True) return sinh(arg) def _eval_rewrite_as_tractable(self, arg): return (exp(arg) - exp(-arg)) / 2 def _eval_rewrite_as_exp(self, arg): return (exp(arg) - exp(-arg)) / 2 def _eval_rewrite_as_cosh(self, arg): return -I*cosh(arg + pi*I/2) def _eval_rewrite_as_tanh(self, arg): tanh_half = tanh(arg/2) return 2*tanh_half/(1 - tanh_half**2) def _eval_rewrite_as_coth(self, arg): coth_half = coth(arg/2) return 2*coth_half/(coth_half**2 - 1) def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_finite(self): if self.args[0].is_imaginary: return True class cosh(HyperbolicFunction): r""" The hyperbolic cosine function, `\frac{e^x + e^{-x}}{2}`. * cosh(x) -> Returns the hyperbolic cosine of x See Also ======== diofant.functions.elementary.hyperbolic.sinh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.acosh """ def fdiff(self, argindex=1): if argindex == 1: return sinh(self.args[0]) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from .trigonometric import cos arg = sympify(arg) if arg.is_Number: if arg in (oo, -oo): return oo elif arg == 0: return Integer(1) elif arg.is_negative: return cls(-arg) else: if arg is zoo: return nan i_coeff = arg.as_coefficient(I) if i_coeff is not None: return cos(i_coeff) else: if _coeff_isneg(arg): return cls(-arg) if arg.func == asinh: return sqrt(1 + arg.args[0]**2) if arg.func == acosh: return arg.args[0] if arg.func == atanh: return 1/sqrt(1 - arg.args[0]**2) if arg.func == acoth: x = arg.args[0] return x/(sqrt(x - 1) * sqrt(x + 1)) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 1: return Integer(0) else: x = sympify(x) if len(previous_terms) >= 2: p = previous_terms[-2] return p * x**2 / (n*(n - 1)) else: return x**n/factorial(n) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from .trigonometric import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return self.expand(deep, **hints), Integer(0) else: return self, Integer(0) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() return cosh(re)*cos(im), sinh(re)*sin(im) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=deep, **hints) return re_part + im_part*I def _eval_expand_trig(self, deep=True, **hints): arg = self.args[0] x = None if arg.is_Add: # TODO, implement more if deep stuff here x, y = arg.as_two_terms() else: coeff, terms = arg.as_coeff_Mul(rational=True) if coeff != 1 and coeff.is_Integer and terms != 1: x = terms y = (coeff - 1)*x if x is not None: return (cosh(x)*cosh(y) + sinh(x)*sinh(y)).expand(trig=True) return cosh(arg) def _eval_rewrite_as_tractable(self, arg): return (exp(arg) + exp(-arg)) / 2 def _eval_rewrite_as_exp(self, arg): return (exp(arg) + exp(-arg)) / 2 def _eval_rewrite_as_sinh(self, arg): return -I*sinh(arg + pi*I/2) def _eval_rewrite_as_tanh(self, arg): tanh_half = tanh(arg/2)**2 return (1 + tanh_half)/(1 - tanh_half) def _eval_rewrite_as_coth(self, arg): coth_half = coth(arg/2)**2 return (coth_half + 1)/(coth_half - 1) def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return Integer(1) else: return self.func(arg) def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_finite(self): if self.args[0].is_imaginary: return True class tanh(HyperbolicFunction): r""" The hyperbolic tangent function, `\frac{\sinh(x)}{\cosh(x)}`. * tanh(x) -> Returns the hyperbolic tangent of x See Also ======== diofant.functions.elementary.hyperbolic.sinh diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.atanh """ def fdiff(self, argindex=1): if argindex == 1: return 1 - tanh(self.args[0])**2 else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """Returns the inverse of this function.""" return atanh @classmethod def eval(cls, arg): from .trigonometric import tan arg = sympify(arg) if arg.is_Number: if arg is oo: return Integer(1) elif arg == -oo: return Integer(-1) elif arg == 0: return Integer(0) elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return nan i_coeff = arg.as_coefficient(I) if i_coeff is not None: if _coeff_isneg(i_coeff): return -I * tan(-i_coeff) return I * tan(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.func == asinh: x = arg.args[0] return x/sqrt(1 + x**2) if arg.func == acosh: x = arg.args[0] return sqrt(x - 1) * sqrt(x + 1) / x if arg.func == atanh: return arg.args[0] if arg.func == acoth: return 1/arg.args[0] @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from .. import bernoulli if n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) a = 2**(n + 1) B = bernoulli(n + 1) F = factorial(n + 1) return a*(a - 1) * B/F * x**n def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from .trigonometric import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return self.expand(deep, **hints), Integer(0) else: return self, Integer(0) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() denom = sinh(re)**2 + cos(im)**2 return sinh(re)*cosh(re)/denom, sin(im)*cos(im)/denom def _eval_rewrite_as_tractable(self, arg): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_exp(self, arg): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp - neg_exp)/(pos_exp + neg_exp) def _eval_rewrite_as_sinh(self, arg): return I*sinh(arg)/sinh(pi*I/2 - arg) def _eval_rewrite_as_cosh(self, arg): return I*cosh(pi*I/2 - arg)/cosh(arg) def _eval_rewrite_as_coth(self, arg): return 1/coth(arg) def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_finite(self): if self.args[0].is_extended_real: return True class coth(HyperbolicFunction): r""" The hyperbolic cotangent function, `\frac{\cosh(x)}{\sinh(x)}`. * coth(x) -> Returns the hyperbolic cotangent of x """ def fdiff(self, argindex=1): if argindex == 1: return -1/sinh(self.args[0])**2 else: raise ArgumentIndexError(self, argindex) def inverse(self, argindex=1): """Returns the inverse of this function.""" return acoth @classmethod def eval(cls, arg): from .trigonometric import cot arg = sympify(arg) if arg.is_Number: if arg is oo: return Integer(1) elif arg == -oo: return Integer(-1) elif arg == 0: return zoo elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return nan i_coeff = arg.as_coefficient(I) if i_coeff is not None: if _coeff_isneg(i_coeff): return I * cot(-i_coeff) return -I * cot(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) if arg.func == asinh: x = arg.args[0] return sqrt(1 + x**2)/x if arg.func == acosh: x = arg.args[0] return x/(sqrt(x - 1) * sqrt(x + 1)) if arg.func == atanh: return 1/arg.args[0] if arg.func == acoth: return arg.args[0] @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from .. import bernoulli if n == 0: return 1 / sympify(x) elif n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) B = bernoulli(n + 1) F = factorial(n + 1) return 2**(n + 1) * B/F * x**n def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from .trigonometric import cos, sin if self.args[0].is_extended_real: if deep: hints['complex'] = False return self.expand(deep, **hints), Integer(0) else: return self, Integer(0) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() denom = sinh(re)**2 + sin(im)**2 return sinh(re)*cosh(re)/denom, -sin(im)*cos(im)/denom def _eval_rewrite_as_tractable(self, arg): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp + neg_exp)/(pos_exp - neg_exp) def _eval_rewrite_as_exp(self, arg): neg_exp, pos_exp = exp(-arg), exp(arg) return (pos_exp + neg_exp)/(pos_exp - neg_exp) def _eval_rewrite_as_sinh(self, arg): return -I*sinh(pi*I/2 - arg)/sinh(arg) def _eval_rewrite_as_cosh(self, arg): return -I*cosh(arg)/cosh(pi*I/2 - arg) def _eval_rewrite_as_tanh(self, arg): return 1/tanh(arg) def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return 1/arg else: return self.func(arg) class ReciprocalHyperbolicFunction(HyperbolicFunction): """Base class for reciprocal functions of hyperbolic functions.""" # To be defined in class _reciprocal_of = None _is_even = None _is_odd = None @classmethod def eval(cls, arg): if arg.could_extract_minus_sign(): if cls._is_even: return cls(-arg) elif cls._is_odd: return -cls(-arg) t = cls._reciprocal_of.eval(arg) return 1/t if t is not None else t def _call_reciprocal(self, method_name, *args, **kwargs): # Calls method_name on _reciprocal_of o = self._reciprocal_of(self.args[0]) return getattr(o, method_name)(*args, **kwargs) def _rewrite_reciprocal(self, method_name, arg): # Special handling for rewrite functions. If reciprocal rewrite returns # unmodified expression, then return None t = self._call_reciprocal(method_name, arg) assert t is not None and t != self._reciprocal_of(arg) return 1/t def _eval_rewrite_as_exp(self, arg): return self._rewrite_reciprocal("_eval_rewrite_as_exp", arg) def _eval_rewrite_as_tractable(self, arg): return self._rewrite_reciprocal("_eval_rewrite_as_tractable", arg) def _eval_rewrite_as_tanh(self, arg): return self._rewrite_reciprocal("_eval_rewrite_as_tanh", arg) def _eval_rewrite_as_coth(self, arg): return self._rewrite_reciprocal("_eval_rewrite_as_coth", arg) def as_real_imag(self, deep=True, **hints): return (1 / self._reciprocal_of(self.args[0])).as_real_imag(deep, **hints) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def _eval_expand_complex(self, deep=True, **hints): re_part, im_part = self.as_real_imag(deep=True, **hints) return re_part + I*im_part def _eval_as_leading_term(self, x): return (1/self._reciprocal_of(self.args[0]))._eval_as_leading_term(x) def _eval_is_extended_real(self): return self._reciprocal_of(self.args[0]).is_extended_real def _eval_is_finite(self): return (1/self._reciprocal_of(self.args[0])).is_finite class csch(ReciprocalHyperbolicFunction): r""" The hyperbolic cosecant function, `\frac{2}{e^x - e^{-x}}` * csch(x) -> Returns the hyperbolic cosecant of x See Also ======== diofant.functions.elementary.hyperbolic.sinh diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.sech diofant.functions.elementary.hyperbolic.asinh diofant.functions.elementary.hyperbolic.acosh """ _reciprocal_of = sinh _is_odd = True def fdiff(self, argindex=1): """Returns the first derivative of this function.""" if argindex == 1: return -coth(self.args[0]) * csch(self.args[0]) else: raise ArgumentIndexError(self, argindex) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): """Returns the next term in the Taylor series expansion.""" from .. import bernoulli if n == 0: return 1/sympify(x) elif n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) B = bernoulli(n + 1) F = factorial(n + 1) return 2 * (1 - 2**n) * B/F * x**n def _eval_rewrite_as_cosh(self, arg): return I / cosh(arg + I * pi / 2) class sech(ReciprocalHyperbolicFunction): r""" The hyperbolic secant function, `\frac{2}{e^x + e^{-x}}` * sech(x) -> Returns the hyperbolic secant of x See Also ======== diofant.functions.elementary.hyperbolic.sinh diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.coth diofant.functions.elementary.hyperbolic.csch diofant.functions.elementary.hyperbolic.asinh diofant.functions.elementary.hyperbolic.acosh """ _reciprocal_of = cosh _is_even = True def fdiff(self, argindex=1): if argindex == 1: return - tanh(self.args[0])*sech(self.args[0]) else: raise ArgumentIndexError(self, argindex) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): from ..combinatorial.numbers import euler if n < 0 or n % 2 == 1: return Integer(0) else: x = sympify(x) return euler(n) / factorial(n) * x**n def _eval_rewrite_as_sinh(self, arg): return I / sinh(arg + I * pi / 2) ############################################################################### # ########################### HYPERBOLIC INVERSES ########################### # ############################################################################### class asinh(Function): """ The inverse hyperbolic sine function. * asinh(x) -> Returns the inverse hyperbolic sine of x See Also ======== diofant.functions.elementary.hyperbolic.cosh diofant.functions.elementary.hyperbolic.tanh diofant.functions.elementary.hyperbolic.sinh """ def fdiff(self, argindex=1): if argindex == 1: return 1/sqrt(self.args[0]**2 + 1) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from .trigonometric import asin arg = sympify(arg) if arg.is_Number: if arg in (oo, -oo, 0): return arg elif arg == 1: return log(sqrt(2) + 1) elif arg == -1: return log(sqrt(2) - 1) elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return zoo i_coeff = arg.as_coefficient(I) if i_coeff is not None: return I * asin(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) if len(previous_terms) >= 2 and n > 2: p = previous_terms[-2] return -p * (n - 2)**2/(n*(n - 1)) * x**2 else: k = (n - 1) // 2 R = RisingFactorial(Rational(1, 2), k) F = factorial(k) return (-1)**k * R / F * x**n / n def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def _eval_rewrite_as_log(self, x): return log(x + sqrt(x**2 + 1)) def inverse(self, argindex=1): """Returns the inverse of this function.""" return sinh class acosh(Function): """ The inverse hyperbolic cosine function. * acosh(x) -> Returns the inverse hyperbolic cosine of x See Also ======== diofant.functions.elementary.hyperbolic.asinh diofant.functions.elementary.hyperbolic.atanh diofant.functions.elementary.hyperbolic.cosh """ def fdiff(self, argindex=1): if argindex == 1: return 1/sqrt(self.args[0]**2 - 1) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): arg = sympify(arg) if arg.is_Number: if arg in (oo, -oo): return oo elif arg == 0: return pi*I / 2 elif arg == 1: return Integer(0) elif arg == -1: return pi*I if arg.is_number: cst_table = { I: log(I*(1 + sqrt(2))), -I: log(-I*(1 + sqrt(2))), Rational(+1, 2): pi/3, Rational(-1, 2): 2*pi/3, sqrt(2)/2: pi/4, -sqrt(2)/2: 3*pi/4, 1/sqrt(2): pi/4, -1/sqrt(2): 3*pi/4, sqrt(3)/2: pi/6, -sqrt(3)/2: 5*pi/6, (sqrt(3) - 1)/sqrt(2**3): 5*pi/12, -(sqrt(3) - 1)/sqrt(2**3): 7*pi/12, sqrt(2 + sqrt(2))/2: pi/8, -sqrt(2 + sqrt(2))/2: 7*pi/8, sqrt(2 - sqrt(2))/2: 3*pi/8, -sqrt(2 - sqrt(2))/2: 5*pi/8, (1 + sqrt(3))/(2*sqrt(2)): pi/12, -(1 + sqrt(3))/(2*sqrt(2)): 11*pi/12, (sqrt(5) + 1)/4: pi/5, -(sqrt(5) + 1)/4: 4*pi/5 } if arg in cst_table: if arg.is_extended_real: return cst_table[arg]*I return cst_table[arg] if arg.is_infinite: return oo @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n == 0: return pi*I / 2 elif n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) if len(previous_terms) >= 2 and n > 2: p = previous_terms[-2] return p * (n - 2)**2/(n*(n - 1)) * x**2 else: k = (n - 1) // 2 R = RisingFactorial(Rational(1, 2), k) F = factorial(k) return -R / F * I * x**n / n def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return I*pi/2 else: return self.func(arg) def inverse(self, argindex=1): """Returns the inverse of this function.""" return cosh class atanh(Function): """ The inverse hyperbolic tangent function. * atanh(x) -> Returns the inverse hyperbolic tangent of x See Also ======== diofant.functions.elementary.hyperbolic.asinh diofant.functions.elementary.hyperbolic.acosh diofant.functions.elementary.hyperbolic.tanh """ def fdiff(self, argindex=1): if argindex == 1: return 1/(1 - self.args[0]**2) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from .trigonometric import atan arg = sympify(arg) if arg.is_Number: if arg == 0: return Integer(0) elif arg == 1: return oo elif arg == -1: return -oo elif arg is oo: return -I * atan(arg) elif arg == -oo: return I * atan(-arg) elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return nan i_coeff = arg.as_coefficient(I) if i_coeff is not None: return I * atan(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) return x**n / n def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return arg else: return self.func(arg) def inverse(self, argindex=1): """Returns the inverse of this function.""" return tanh class acoth(Function): """ The inverse hyperbolic cotangent function. * acoth(x) -> Returns the inverse hyperbolic cotangent of x """ def fdiff(self, argindex=1): if argindex == 1: return 1/(1 - self.args[0]**2) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from .trigonometric import acot arg = sympify(arg) if arg.is_Number: if arg in (oo, -oo): return Integer(0) elif arg == 0: return pi*I / 2 elif arg == 1: return oo elif arg == -1: return -oo elif arg.is_negative: return -cls(-arg) else: if arg is zoo: return 0 i_coeff = arg.as_coefficient(I) if i_coeff is not None: return -I * acot(i_coeff) else: if _coeff_isneg(arg): return -cls(-arg) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n == 0: return pi*I / 2 elif n < 0 or n % 2 == 0: return Integer(0) else: x = sympify(x) return x**n / n def _eval_as_leading_term(self, x): from ...series import Order arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and Order(1, x).contains(arg): return I*pi/2 else: return self.func(arg) def inverse(self, argindex=1): """Returns the inverse of this function.""" return coth
py
1a4488b8fc2f137cbf983a278efac394f395e35d
from django.contrib import admin from django.urls import path from django.urls.conf import include from django.conf.urls import url from django.conf import settings from django.conf.urls.static import static from .views import * from . import views urlpatterns = [ path('', views.user_login, name='login'), path('logout/', views.user_logout, name='logout'), path('signup/', views.user_signup, name= 'signup'), path('profile', profile, name = 'profile'), path('homepage', homepage, name = 'homepage'), path('profile/update/<int:pk>', UpdateUserProfile.as_view(), name = 'UpdateUserProfile'), path('business/update/<int:pk>', UpdateBusiness.as_view(), name = 'updatebusiness'), path('search/', search_results, name = 'search_business'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
py
1a4489bdaac176027392636a0710ad4e86a20d86
if __name__ == '__main__': from setuptools import setup, Extension _synctex_parser = Extension('pysynctex._synctex_parser', sources=['wrapper/synctex_parser.i', 'wrapper/synctex_package/synctex_parser.c', 'wrapper/synctex_package/synctex_parser_utils.c'], include_dirs=['wrapper/synctex_package']) setup(name='PySyncTeX', version='0.2.0', author='Jan Kumor', author_email='[email protected]', description='Python wrapper for SyncTeX parser C library.', long_description=open('README.rst').read(), url='https://github.com/elohhim/PySyncTeX', license="MIT", platforms='ANY', packages=['pysynctex'], ext_modules=[_synctex_parser], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: Linux', 'Natural Language :: English', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Text Processing :: Markup :: LaTeX', ] )
py
1a448a6fc512c64be785b1fabc260b80cf5ccad8
import copy import datetime from . import storages class Database(object): def __init__(self, storage=storages.MemoryStorage(), auto_commit=True): self.storage = storage if storage: self._tables = storage.read() else: self._tables = {} self.auto_commit = auto_commit def commit(self): self.storage.write(self._tables) def table(self, kind): if kind not in self._tables: self._tables[kind] = {} table = Table(kind, self._tables[kind], self) return table class Table(object): def __init__(self, kind, dictionary=None, database=None): self.kind = kind if dictionary is None: self.dictionary = {} else: # bind dictionary to the argument one self.dictionary = dictionary self.database = database def _auto_commit(self): if self.database and self.database.auto_commit: self.database.commit() def _set_object(self, object_id, obj): self.dictionary[object_id] = dict(copy.deepcopy(obj)) self._auto_commit() def _get_object(self, object_id): return copy.deepcopy(self.dictionary.get(object_id, None)) def _delete_object(self, object_id): try: del self.dictionary[object_id] self._auto_commit() except KeyError: pass def _do_validate_id(self, object_id): if object_id not in self.dictionary: if isinstance(object_id, str): object_id = "'%s'" % object_id raise KeyError("invalid object_id %s" % str(object_id)) @classmethod def _next_id(cls): return str(int(datetime.datetime.now().timestamp() * 1000000)) def insert(self, obj): object_id = self.__class__._next_id() self._set_object(object_id, obj) return object_id def insert_multi(self, objects): return [self.insert(obj) for obj in objects] def get(self, object_id): return self._get_object(object_id) def get_multi(self, object_ids): return [self.get(object_id) for object_id in object_ids] def update(self, object_id, obj): self._do_validate_id(object_id) self._set_object(object_id, obj) return object_id def update_multi(self, object_ids, objects): if len(object_ids) != len(objects): raise ValueError("size of object_ids and objects must be the same") for object_id in object_ids: self._do_validate_id(object_id) for object_id, obj in zip(object_ids, objects): self._set_object(object_id, obj) return object_ids def update_or_insert(self, object_id, obj): self._set_object(object_id, obj) return object_id def update_or_insert_multi(self, object_ids, objects): if len(object_ids) != len(objects): raise ValueError("size of object_ids and objects must be the same") for object_id, obj in zip(object_ids, objects): self._set_object(object_id, obj) return object_ids def delete(self, object_id, ignore_exception=False): if not ignore_exception: self._do_validate_id(object_id) self._delete_object(object_id) def delete_multi(self, object_ids): for object_id in object_ids: self._do_validate_id(object_id) for object_id in object_ids: self._delete_object(object_id) def query(self, test_func=lambda obj: True): return Query(self.dictionary, test_func) class Query(object): def __init__(self, dictionary, test_func=lambda obj: True): self.dictionary = dictionary self.test_func = test_func def fetch(self, ids_only=False): dictionary = copy.deepcopy(self.dictionary) if ids_only: return [object_id for object_id, obj in dictionary.items() if self.test_func(obj)] else: return [obj for obj in dictionary.values() if self.test_func(obj)]
py
1a448aa7c2c0ddd250222b4baaad8f74a00be1b0
# Copyright 2018 United States Government as represented by the Administrator of # the National Aeronautics and Space Administration. No copyright is claimed in # the United States under Title 17, U.S. Code. All Other Rights Reserved. # The Stochastic Reduced Order Models with Python (SROMPy) platform is licensed # under the Apache License, Version 2.0 (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
py
1a448be60afc5a5671243cc00e917ea387b52758
#!/usr/bin/env python ################################################################################# # Copyright 2018 ROBOTIS CO., LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################# # Authors: Gilbert # import rospy import numpy as np import math from math import pi import time from geometry_msgs.msg import Twist, Point, Pose from sensor_msgs.msg import LaserScan from nav_msgs.msg import Odometry from std_srvs.srv import Empty from tf.transformations import euler_from_quaternion, quaternion_from_euler from respawnGoal import Respawn class Env(): def __init__(self, action_size): self.goal_x = 0 self.goal_y = 0 self.heading = 0 self.action_size = action_size self.initGoal = True self.get_goalbox = False self.position = Pose() self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=2) self.sub_odom = rospy.Subscriber('odom', Odometry, self.getOdometry) self.reset_proxy = rospy.ServiceProxy('gazebo/reset_simulation', Empty) self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty) self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty) self.respawn_goal = Respawn() def getGoalDistace(self): goal_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y), 2) return goal_distance def getOdometry(self, odom): self.position = odom.pose.pose.position orientation = odom.pose.pose.orientation orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w] _, _, yaw = euler_from_quaternion(orientation_list) goal_angle = math.atan2(self.goal_y - self.position.y, self.goal_x - self.position.x) heading = goal_angle - yaw if heading > pi: heading -= 2 * pi elif heading < -pi: heading += 2 * pi self.heading = round(heading, 2) def getState(self, scan): scan_range = [] heading = self.heading min_range = 0.15 done = False for i in range(len(scan.ranges)): if scan.ranges[i] == float('Inf'): scan_range.append(3.5) elif np.isnan(scan.ranges[i]): scan_range.append(0) else: scan_range.append(scan.ranges[i]) obstacle_min_range = round(min(scan_range), 2) obstacle_angle = np.argmin(scan_range) rospy.loginfo("min_range:%s angle:%s scan_range:%s", obstacle_min_range,obstacle_angle, scan_range ) if min_range > min(scan_range) > 0: done = True current_distance = round(math.hypot(self.goal_x - self.position.x, self.goal_y - self.position.y), 2) if current_distance < 0.2: self.get_goalbox = True current_x = round(self.position.x,2) current_y = round(self.position.y,2) return [current_x,current_y,self.goal_x,self.goal_y,heading, current_distance, obstacle_min_range, obstacle_angle], done def setReward(self, state, done, action): yaw_reward = [] current_distance = state[-3] heading = state[-4] for i in range(5): angle = -pi / 4 + heading + (pi / 8 * i) + pi / 2 tr = 1 - 4 * math.fabs(0.5 - math.modf(0.25 + 0.5 * angle % (2 * math.pi) / math.pi)[0]) yaw_reward.append(tr) distance_rate = 2 ** (current_distance / self.goal_distance) reward = ((round(yaw_reward[action] * 5, 2)) * distance_rate) #rospy.loginfo("yaw_reward :%s", yaw_reward ) if done: rospy.loginfo("Collision!!") reward = -150 self.pub_cmd_vel.publish(Twist()) if self.get_goalbox: rospy.loginfo("Goal!!") reward = 200 self.pub_cmd_vel.publish(Twist()) self.goal_x, self.goal_y = self.respawn_goal.getPosition(True, delete=True) self.goal_distance = self.getGoalDistace() self.get_goalbox = False return reward def control(self, action): #rospy.loginfo("action :%s", action ) act = int(action) vel_cmd = Twist() if act == 0 : #forward vel_cmd.linear.x = 0.2 self.pub_cmd_vel.publish(vel_cmd) elif act == 1 :#backward vel_cmd.linear.x = -0.2 self.pub_cmd_vel.publish(vel_cmd) elif act == 2 :#left vel_cmd.angular.z = pi/3 self.pub_cmd_vel.publish(vel_cmd) time.sleep(1) vel_cmd.linear.x = 0.2 vel_cmd.angular.z = 0.0 self.pub_cmd_vel.publish(vel_cmd) elif act == 3 :#right vel_cmd.angular.z = -pi/3 self.pub_cmd_vel.publish(vel_cmd) time.sleep(1) vel_cmd.linear.x = 0.2 vel_cmd.angular.z = 0.0 self.pub_cmd_vel.publish(vel_cmd) time.sleep(1) data = None while data is None: try: data = rospy.wait_for_message('scan', LaserScan, timeout=5) except: pass state, done = self.getState(data) reward = self.setReward(state, done, action) return np.asarray(state), reward, done def reset(self): rospy.wait_for_service('gazebo/reset_simulation') try: self.reset_proxy() except (rospy.ServiceException) as e: print("gazebo/reset_simulation service call failed") data = None while data is None: try: data = rospy.wait_for_message('scan', LaserScan, timeout=5) except: pass if self.initGoal: self.goal_x, self.goal_y = self.respawn_goal.getPosition() self.initGoal = False self.goal_distance = self.getGoalDistace() state, done = self.getState(data) return np.asarray(state).tolist()
py
1a448cb755aa154c4d1cb00784d3534f5b118245
#!/usr/bin/env python3 import os, sys service = "[Unit]\n"\ "Description={description}\n"\ "After=network.target\n"\ "StartLimitIntervalSec=0\n"\ "\n"\ "[Service]\n"\ "Type=simple\n"\ "Restart=always\n"\ "RestartSec=1\n"\ "User=root\n"\ "ExecStart={exec}\n"\ "\n"\ "[Install]\n"\ "WantedBy=multi-user.target" name = False desc = False path = False command = False for arg in sys.argv: if "--name=" in arg: name = arg.split('=')[1] + ".service" if "--path=" in arg: path = arg.split('=')[1] if "--command=" in arg: command = arg.split('=')[1] if "--desc=" in arg: desc = arg.split('=')[1] if arg == "-h" or arg == "--help": print("Usage: python3 createservice.py [--name=NAME] [--path=PATH] [--command=COMMAND] [--desc=DESC]") exit(0) if not name: name = input("Service name: ") + ".service" if not path: path = input("Executable binary path: ") if not command: command = input("Command and args: ") if not desc: desc = input("Description: ") service = service.replace("{description}", desc).replace("{exec}", path + " " + command) f = open("/lib/systemd/system/" + name, "w") f.write(service) f.close() print(service) print() print("Wrote to /lib/systemd/system/" + name) os.system("systemctl enable " + name) os.system("systemctl start " + name) print("Started and enabled service.")
py
1a448d808be139254e3a743bc4f3f1ea79902398
import pytest from dorfperfekt.tile import ( InvalidTileDefinitionError, Terrain, string2tile, tile2string, validate_terrains, validate_tiles, ) def test_tile(): tile = string2tile("frdwtg") assert tile2string(tile) == "FRDWTG" assert tile2string(tile._replace(ori=0)) == "DWTGFR" assert tile2string(tile._replace(ori=-1)) == "WTGFRD" assert tile.ori == 2 assert tile.terrains[0] is Terrain.DWELLING assert tile.terrains[2] is Terrain.TRAIN assert tile.terrains[(7 - tile.ori) % 6] is Terrain.RANCH def test_from_letter(): tile = string2tile("w") assert tile2string(tile) == "WWWWWW" def test_equality(): tile1 = string2tile("gfrrrr") tile2 = string2tile("frrrrg") assert tile1.terrains == tile2.terrains def test_assertions(): with pytest.raises(InvalidTileDefinitionError): string2tile("gr") with pytest.raises(InvalidTileDefinitionError): string2tile("k") def test_validate_terrains(): assert validate_terrains(Terrain.GRASS, Terrain.GRASS) == (True, True) assert validate_terrains(Terrain.GRASS, Terrain.COAST) == (True, True) assert validate_terrains(Terrain.GRASS, Terrain.WATER) == (False, False) assert validate_terrains(Terrain.WATER, Terrain.WATER) == (True, True) assert validate_terrains(Terrain.GRASS, Terrain.RANCH) == (True, False) assert validate_terrains(Terrain.WATER, Terrain.OPEN) == (True, None) def test_validate_tiles(): valid, perfect = validate_tiles(string2tile("dwwggr"), string2tile("cwrogd")) assert not valid and perfect is None valid, perfect = validate_tiles(string2tile("dwwggr"), string2tile("dcwrog")) assert valid and perfect == (True, True, True, False, None, False)
py
1a448e4645f4aa210308d414329e77b03ab0d70c
from distutils.core import setup with open("README.md", "r") as fh: long_description = fh.read() setup( name="GPGame", version="2020.0.2", author="Nishant Vikramaditya", author_email="[email protected]", description="An abstraction layer on the Kivy GPU accelerated engine.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Nv7-GitHub/GPGame", packages=["GPGame"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', install_requires=["Kivy"] )
py
1a448e640a2ec33f7896aaa31aa8a14befef1235
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging logger = logging.getLogger('bcdocs') class BaseStyle(object): def __init__(self, doc, indent_width=2): self.doc = doc self.indent_width = indent_width self._indent = 0 self.keep_data = True @property def indentation(self): return self._indent @indentation.setter def indentation(self, value): self._indent = value def new_paragraph(self): return '\n%s' % self.spaces() def indent(self): self._indent += 1 def dedent(self): if self._indent > 0: self._indent -= 1 def spaces(self): return ' ' * (self._indent * self.indent_width) def bold(self, s): return s def ref(self, link, title=None): return link def h2(self, s): return s def h3(self, s): return s def underline(self, s): return s def italics(self, s): return s class ReSTStyle(BaseStyle): def __init__(self, doc, indent_width=2): BaseStyle.__init__(self, doc, indent_width) self.do_p = True self.a_href = None self.list_depth = 0 def new_paragraph(self): self.doc.write('\n\n%s' % self.spaces()) def new_line(self): self.doc.write('\n%s' % self.spaces()) def _start_inline(self, markup): self.doc.write(markup) def _end_inline(self, markup): # Sometimes the HTML markup has whitespace between the end # of the text inside the inline markup and the closing element # (e.g. <b>foobar </b>). This trailing space will cause # problems in the ReST inline markup so we remove it here # by popping the last item written off the stack, striping # the whitespace and then pushing it back on the stack. last_write = self.doc.pop_write() self.doc.push_write(last_write.rstrip(' ')) self.doc.write(markup + ' ') def start_bold(self, attrs=None): self._start_inline('**') def end_bold(self): self._end_inline('**') def start_b(self, attrs=None): self.doc.do_translation = True self.start_bold(attrs) def end_b(self): self.doc.do_translation = False self.end_bold() def bold(self, s): if s: self.start_bold() self.doc.write(s) self.end_bold() def ref(self, title, link=None): if link is None: link = title self.doc.write(':doc:`%s <%s>`' % (title, link)) def _heading(self, s, border_char): border = border_char * len(s) self.new_paragraph() self.doc.write('%s\n%s\n%s' % (border, s, border)) self.new_paragraph() def h1(self, s): self._heading(s, '*') def h2(self, s): self._heading(s, '=') def h3(self, s): self._heading(s, '-') def start_italics(self, attrs=None): self._start_inline('*') def end_italics(self): self._end_inline('*') def italics(self, s): if s: self.start_italics() self.doc.write(s) self.end_italics() def start_p(self, attrs=None): if self.do_p: self.doc.write('\n\n%s' % self.spaces()) def end_p(self): if self.do_p: self.doc.write('\n\n%s' % self.spaces()) def start_code(self, attrs=None): self.doc.do_translation = True self._start_inline('``') def end_code(self): self.doc.do_translation = False self._end_inline('``') def code(self, s): if s: self.start_code() self.doc.write(s) self.end_code() def start_note(self, attrs=None): self.new_paragraph() self.doc.write('.. note::') self.indent() self.new_paragraph() def end_note(self): self.dedent() self.new_paragraph() def start_important(self, attrs=None): self.new_paragraph() self.doc.write('.. warning::') self.indent() self.new_paragraph() def end_important(self): self.dedent() self.new_paragraph() def start_danger(self, attrs=None): self.new_paragraph() self.doc.write('.. danger::') self.indent() self.new_paragraph() def end_danger(self): self.dedent() self.new_paragraph() def start_a(self, attrs=None): if attrs: for attr_key, attr_value in attrs: if attr_key == 'href': self.a_href = attr_value self.doc.write('`') else: # There are some model documentation that # looks like this: <a>DescribeInstances</a>. # In this case we just write out an empty # string. self.doc.write(' ') self.doc.do_translation = True def link_target_definition(self, refname, link): self.doc.writeln('.. _%s: %s' % (refname, link)) def sphinx_reference_label(self, label, text=None): if text is None: text = label if self.doc.target == 'html': self.doc.write(':ref:`%s <%s>`' % (text, label)) else: self.doc.write(text) def end_a(self): self.doc.do_translation = False if self.a_href: last_write = self.doc.pop_write() last_write = last_write.rstrip(' ') if last_write and last_write != '`': if ':' in last_write: last_write = last_write.replace(':', r'\:') self.doc.push_write(last_write) self.doc.push_write(' <%s>`__' % self.a_href) elif last_write == '`': # Look at start_a(). It will do a self.doc.write('`') # which is the start of the link title. If that is the # case then there was no link text. We should just # use an inline link. The syntax of this is # `<http://url>`_ self.doc.push_write('`<%s>`__' % self.a_href) else: self.doc.push_write(self.a_href) self.doc.hrefs[self.a_href] = self.a_href self.doc.write('`__') self.a_href = None self.doc.write(' ') def start_i(self, attrs=None): self.doc.do_translation = True self.start_italics() def end_i(self): self.doc.do_translation = False self.end_italics() def start_li(self, attrs=None): self.new_line() self.do_p = False self.doc.write('* ') def end_li(self): self.do_p = True self.new_line() def li(self, s): if s: self.start_li() self.doc.writeln(s) self.end_li() def start_ul(self, attrs=None): if self.list_depth != 0: self.indent() self.list_depth += 1 self.new_paragraph() def end_ul(self): self.list_depth -= 1 if self.list_depth != 0: self.dedent() self.new_paragraph() def start_ol(self, attrs=None): # TODO: Need to control the bullets used for LI items if self.list_depth != 0: self.indent() self.list_depth += 1 self.new_paragraph() def end_ol(self): self.list_depth -= 1 if self.list_depth != 0: self.dedent() self.new_paragraph() def start_examples(self, attrs=None): self.doc.keep_data = False def end_examples(self): self.doc.keep_data = True def start_fullname(self, attrs=None): self.doc.keep_data = False def end_fullname(self): self.doc.keep_data = True def start_codeblock(self, attrs=None): self.doc.write('::') self.indent() self.new_paragraph() def end_codeblock(self): self.dedent() self.new_paragraph() def codeblock(self, code): """ Literal code blocks are introduced by ending a paragraph with the special marker ::. The literal block must be indented (and, like all paragraphs, separated from the surrounding ones by blank lines). """ self.start_codeblock() self.doc.writeln(code) self.end_codeblock() def toctree(self): if self.doc.target == 'html': self.doc.write('\n.. toctree::\n') self.doc.write(' :maxdepth: 1\n') self.doc.write(' :titlesonly:\n\n') else: self.start_ul() def tocitem(self, item, file_name=None): if self.doc.target == 'man': self.li(item) else: if file_name: self.doc.writeln(' %s' % file_name) else: self.doc.writeln(' %s' % item) def hidden_toctree(self): if self.doc.target == 'html': self.doc.write('\n.. toctree::\n') self.doc.write(' :maxdepth: 1\n') self.doc.write(' :hidden:\n\n') def hidden_tocitem(self, item): if self.doc.target == 'html': self.tocitem(item) def table_of_contents(self, title=None, depth=None): self.doc.write('.. contents:: ') if title is not None: self.doc.writeln(title) if depth is not None: self.doc.writeln(' :depth: %s' % depth) def start_sphinx_py_class(self, class_name): self.new_paragraph() self.doc.write('.. py:class:: %s' % class_name) self.indent() self.new_paragraph() def end_sphinx_py_class(self): self.dedent() self.new_paragraph() def start_sphinx_py_method(self, method_name, parameters=None): self.new_paragraph() content = '.. py:method:: %s' % method_name if parameters is not None: content += '(%s)' % parameters self.doc.write(content) self.indent() self.new_paragraph() def end_sphinx_py_method(self): self.dedent() self.new_paragraph() def start_sphinx_py_attr(self, attr_name): self.new_paragraph() self.doc.write('.. py:attribute:: %s' % attr_name) self.indent() self.new_paragraph() def end_sphinx_py_attr(self): self.dedent() self.new_paragraph() def write_py_doc_string(self, docstring): docstring_lines = docstring.splitlines() for docstring_line in docstring_lines: self.doc.writeln(docstring_line) def external_link(self, title, link): if self.doc.target == 'html': self.doc.write('`%s <%s>`_' % (title, link)) else: self.doc.write(title)
py
1a448ec802754f313a500e2ecf6595ce2aa1a345
from django.apps import AppConfig class NotificationsConfig(AppConfig): name = 'toss.notifications'
py
1a448f74c1fa75348c2096a2bbe0e02103c24a1b
import tempfile, time, sys import pymailer f = tempfile.NamedTemporaryFile('r+t', suffix='.html', delete=True) f.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\ <html lang="fr">\ <head>\ <meta http-equiv="content-type" content="text/html;charset=utf-8" />\ </head>\ <body>\ <p>The computer has been turned on, on ') #print(f.name + ' created') arg = ['-s', f.name, '/home/romain/git/python-mailer/recipients.csv', 'Computer Turned On'] f.write((time.strftime("%A %d %B %Y, %H:%M:%S"))) f.write('.</p>\ </body>\ </html>') f.seek(0) # return to beginning of file pymailer.main(arg) f.close() # temporary file is automatically deleted here
py
1a448f805c75649ab174404af989b1705d09a6b0
from django import forms from django.contrib.auth.forms import UserCreationForm from users.models import User, Issue class UserRegisterForm(UserCreationForm): email = forms.EmailField() class Meta: model = User fields = ['name', 'phone', 'dob', 'email', 'password1', 'password2', 't_c'] class UserUpdateForm(forms.ModelForm): email = forms.EmailField() class Meta: model = User fields = ['name', 'phone', 'dob', 'email', 'image'] class IssueForm(forms.ModelForm): class Meta: model = Issue fields = ['issue_head', 'issue_body'] from django.contrib.auth.forms import AuthenticationForm class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm): def confirm_login_allowed(self, user): pass
py
1a44900dc5527649dfb73da64cf59e91a9b4ad28
# Copyright (c) 2004 Divmod. # See LICENSE for details. import urllib.request, urllib.parse, urllib.error, warnings from twisted.python import log, failure from nevow import util from nevow.stan import directive, Unset, invisible, _PrecompiledSlot from nevow.inevow import ICanHandleException, IData, IMacroFactory, IRenderer, IRendererFactory from nevow.flat import precompile, serialize from nevow.accessors import convertToData from nevow.context import WovenContext from nevow.util import toBytes, unicode allowSingleton = ('img', 'br', 'hr', 'base', 'meta', 'link', 'param', 'area', 'input', 'col', 'basefont', 'isindex', 'frame') def ProtoSerializer(original, context): return '<%s />' % original def _datacallback(result, context): context.remember(result, IData) return '' def TagSerializer(original, context, contextIsMine=False): """ Original is the tag. Context is either: - the context of someone up the chain (if contextIsMine is False) - this tag's context (if contextIsMine is True) """ # print "TagSerializer:",original, "ContextIsMine",contextIsMine, "Context:",context visible = bool(original.tagName) if visible and context.isAttrib: raise RuntimeError("Tried to render tag '%s' in an tag attribute context." % (original.tagName)) if context.precompile and original.macro: toBeRenderedBy = original.macro ## Special case for directive; perhaps this could be handled some other way with an interface? if isinstance(toBeRenderedBy, directive): toBeRenderedBy = IMacroFactory(context).macro(context, toBeRenderedBy.name) original.macro = Unset newContext = WovenContext(context, original) yield serialize(toBeRenderedBy(newContext), newContext) return ## TODO: Do we really need to bypass precompiling for *all* specials? ## Perhaps just render? if context.precompile and ( [x for x in list(original._specials.values()) if x is not None and x is not Unset] or original.slotData): ## The tags inside this one get a "fresh" parent chain, because ## when the context yielded here is serialized, the parent ## chain gets reconnected to the actual parents at that ## point, since the render function here could change ## the actual parentage hierarchy. nestedcontext = WovenContext(precompile=context.precompile, isAttrib=context.isAttrib) # If necessary, remember the MacroFactory onto the new context chain. macroFactory = IMacroFactory(context, None) if macroFactory is not None: nestedcontext.remember(macroFactory, IMacroFactory) original = original.clone(deep=False) if not contextIsMine: context = WovenContext(context, original) context.tag.children = precompile(context.tag.children, nestedcontext) yield context return ## Don't render patterns if original.pattern is not Unset and original.pattern is not None: return if not contextIsMine: if original.render: ### We must clone our tag before passing to a render function original = original.clone(deep=False) context = WovenContext(context, original) if original.data is not Unset: newdata = convertToData(original.data, context) if isinstance(newdata, util.Deferred): yield newdata.addCallback(lambda newdata: _datacallback(newdata, context)) else: _datacallback(newdata, context) if original.render: ## If we have a render function we want to render what it returns, ## not our tag toBeRenderedBy = original.render # erase special attribs so if the renderer returns the tag, # the specials won't be on the context twice. original._clearSpecials() yield serialize(toBeRenderedBy, context) return if not visible: for child in original.children: yield serialize(child, context) return yield '<%s' % original.tagName if original.attributes: attribContext = WovenContext(parent=context, precompile=context.precompile, isAttrib=True) for (k, v) in sorted(original.attributes.items()): if v is None: continue yield ' %s="' % k yield serialize(v, attribContext) yield '"' if not original.children: if original.tagName in allowSingleton: yield ' />' else: yield '></%s>' % original.tagName else: yield '>' for child in original.children: yield serialize(child, context) yield '</%s>' % original.tagName def EntitySerializer(original, context): if original.name in ['amp', 'gt', 'lt', 'quot']: return '&%s;' % original.name return '&#%s;' % original.num def _jsSingleQuoteQuote(quotable): return quotable.replace( "\\", "\\\\").replace( "'", r"\'").replace( "\n", "\\n").replace( "\r", "\\r") def RawSerializer(original, context): if context.inJSSingleQuoteString: return _jsSingleQuoteQuote(original) return original def StringSerializer(original, context): # Quote the string as necessary. URLs need special quoting - only # alphanumeric and a few punctation characters are valid. # Otherwise we use normal XML escaping rules but also replacing " # in an attribute because Nevow always uses "..." for values. original=toBytes(original) if context.inURL: # The magic string "-_.!*'()" also appears in url.py. Thinking about # changing this? Change that, too. return urllib.parse.quote(original, safe="-_.!*'()") ## quote it if context.inJS: original = _jsSingleQuoteQuote(original) if not context.inJSSingleQuoteString: original = b"'%s'" % (original, ) if context.isAttrib: return original.replace(b"&", b"&amp;").replace(b"<", b"&lt;").replace(b">", b"&gt;").replace(b'"', b"&quot;") elif context.inJS: return original else: return original.replace(b"&", b"&amp;").replace(b"<", b"&lt;").replace(b">", b"&gt;") def NoneWarningSerializer(original, context): if context.isAttrib: ## We don't want the big red None warning inside a html attribute. Just leave it blank. return b'' elif context.inURL: return b'' elif context.inJS: return b'' return b'<span style="font-size: xx-large; font-weight: bold; color: red; border: thick solid red;">None</span>' def StringCastSerializer(original, context): if context.inJS: return str(original) return StringSerializer(str(original), context) def BooleanSerializer(original, context): if context.inJS: if original: return b'true' return b'false' return str(original) def ListSerializer(original, context): for item in original: yield serialize(item, context) def XmlSerializer(original, context): return original.content PASS_SELF = object() def FunctionSerializer_nocontext(original): code = getattr(original, 'func_code', None) if code is None: return True argcount = code.co_argcount if argcount == 1: return True if argcount == 3: return PASS_SELF return False def FunctionSerializer(original, context, nocontextfun=FunctionSerializer_nocontext): if context.precompile: return WovenContext(tag=invisible(render=original)) else: data = convertToData(context.locate(IData), context) try: nocontext = nocontextfun(original) if nocontext is True: if hasattr(original, '__code__') and (original.__code__.co_argcount == 3 or ( original.__code__.co_argcount == 2 and original.__code__.co_varnames[0] != 'self')): result = original(context, data) else: result = original(data) else: if nocontext is PASS_SELF: renderer = context.locate(IRenderer) result = original(renderer, context, data) else: result = original(context, data) except StopIteration: raise RuntimeError("User function %r raised StopIteration." % original) return serialize(result, context) def MethodSerializer(original, context): def nocontext(original): func = getattr(original, 'im_func', None) code = getattr(func, 'func_code', None) return code is None or code.co_argcount == 2 return FunctionSerializer(original, context, nocontext) def RendererSerializer(original, context): def nocontext(original): func = getattr(original, 'im_func', None) code = getattr(func, 'func_code', None) return code is None or code.co_argcount == 2 return FunctionSerializer(original.rend, context, nocontext) def DirectiveSerializer(original, context): if context.precompile: return original rendererFactory = context.locate(IRendererFactory) renderer = rendererFactory.renderer(context, original.name) return serialize(renderer, context) def SlotSerializer(original, context): """ Serialize a slot. If the value is already available in the given context, serialize and return it. Otherwise, if this is a precompilation pass, return a new kind of slot which captures the current render context, so that any necessary quoting may be performed. Otherwise, raise an exception indicating that the slot cannot be serialized. """ if context.precompile: try: data = context.locateSlotData(original.name) except KeyError: return _PrecompiledSlot( original.name, precompile(original.children, context), original.default, context.isAttrib, context.inURL, context.inJS, context.inJSSingleQuoteString, original.filename, original.lineNumber, original.columnNumber) else: return serialize(data, context) try: data = context.locateSlotData(original.name) except KeyError: if original.default is None: raise data = original.default return serialize(data, context) def PrecompiledSlotSerializer(original, context): """ Serialize a pre-compiled slot. Return the serialized value of the slot or raise a KeyError if it has no value. """ # Precompilation should _not_ be happening at this point, but Nevow is very # sloppy about precompiling multiple times, so sometimes we are in a # precompilation context. In this case, there is nothing to do, just # return the original object. The case which seems to exercise this most # often is the use of a pattern as the stan document given to the stan # loader. The pattern has already been precompiled, but the stan loader # precompiles it again. This case should be eliminated by adding a loader # for precompiled documents. if context.precompile: warnings.warn( "[v0.9.9] Support for multiple precompilation passes is deprecated.", PendingDeprecationWarning) return original try: data = context.locateSlotData(original.name) except KeyError: if original.default is None: raise data = original.default originalContext = context.clone(deep=False) originalContext.isAttrib = original.isAttrib originalContext.inURL = original.inURL originalContext.inJS = original.inJS originalContext.inJSSingleQuoteString = original.inJSSingleQuoteString return serialize(data, originalContext) def ContextSerializer(original, context): """ Serialize the given context's tag in that context. """ originalContext = original.clone(deep=False) originalContext.precompile = context and context.precompile or False if originalContext.parent is not None: originalContext.parent = originalContext.parent.clone(cloneTags=False) originalContext.chain(context) try: return TagSerializer(originalContext.tag, originalContext, contextIsMine=True) except: f = failure.Failure() handler = context.locate(ICanHandleException) if handler: return handler.renderInlineError(context, f) else: log.err(f) return """<div style="border: 1px dashed red; color: red; clear: both">[[ERROR]]</div>""" def CommentSerializer(original, context): yield "<!--" for x in original.children: yield serialize(x, context) yield "-->" def DocFactorySerializer(original, ctx): """Serializer for document factories. """ return serialize(original.load(ctx), ctx) def FailureSerializer(original, ctx): from nevow import failure return serialize(failure.formatFailure(original), ctx)
py
1a4490d70e9466b7eaeb9cb9df35db6a487b41b1
#!/usr/bin/env python3 ################################################################################# # The MIT License (MIT) # # Copyright (c) 2015, George Webster. All rights reserved. # # Approved for Public Release; Distribution Unlimited 14-1511 # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ################################################################################# import argparse import configparser import csv import logging import requests import sys import time from collections import namedtuple from itertools import islice def submit_crits(domain, cfg): """ Submits domain to CRITs """ headers = {'User-agent': 'benign_domains'} # submit domain url = "{0}/api/v1/domains/".format(cfg['crits'].get('url')) params = { 'api_key': cfg['crits'].get('key'), 'username': cfg['crits'].get('user'), 'source': cfg['crits'].get('source'), 'domain': domain } try: response = requests.post(url, headers=headers, data=params, verify=False) if response.status_code == requests.codes.ok: response_json = response.json() logging.info("\tSubmitted domain info for {0} to Crits, response was {1}".format(domain, response_json.get('message', ''))) except: logging.info("Exception caught from Crits when submitting domain {0}".format(domain)) def check_virustotal(domain, api_key, threshold): """ Checks VirusTotal to see if the domain is malicious """ #resource = "{0}domain".format("http://www.", domain) url = 'https://www.virustotal.com/vtapi/v2/url/report' params = {'resource': domain, 'apikey': api_key, 'allinfo': 1} try: response = requests.get(url, params=params) if response.status_code == requests.codes.ok: response_json = response.json() logging.info("\tSubmitted domain {0} to VirusTotal for verification, response was {1}".format(domain, response_json.get('verbose_msg', ''))) if response_json['response_code'] == 0: logging.info("\tVT: Has not seen {0} before, assuming domain is benign".format(domain)) return True elif response_json['response_code'] == -1: logging.debug("\tVT: Reporting that domain {0} is malformed, assuming malicious".format(domain)) return False elif response_json['response_code'] == 1: total = int(response_json.get('total', 0)) positive = int(response_json.get('positives', 0)) additionalinfo = response_json.get('additional_info', '') if additionalinfo: logging.info("\tVT: Category is: {0}".format(additionalinfo.get('categories', ''))) logging.info("\tVT: Positive scans: {0} out of {1} total scans".format(positive, total)) if positive > int(threshold): logging.info("\tVT: Threshold exceeded, skipping domain") return False else: logging.info("\tVT: Under threshold, domain is benign") return True except: logging.debug("Exception caught from VirusTotal when receiving report") return False def setup_cli(args, cfg): """ Configure command-line arguements """ description =""" Benign_domains outputs a list of preceived benign domains. This is intended to help gather data for ML training sets and generate white lists. The core set of domains are provided by majestic million. Options: - Validate domains against VirusTotal's datasets (in progress) - Submit domains to a CRITs instance - Output to a file""" parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-s', '--start', action='store', default=cfg['benign'].get('startDomain', fallback='0'), dest='start', type=int, help='Define starting domain rank number. Overrides config file') parser.add_argument('-e', '--end', action='store', default=cfg['benign'].get('endDomain', fallback='200'), dest='end', type=int, help='Define ending domain rank number. Overrides config file') return parser.parse_args(args) def main(): """ Main logic for program """ print("Starting up benign_domain parsing script!!!") # Read configuration file cfg = configparser.ConfigParser() cfg.read('benign.cfg') # Set up CLI interface args = setup_cli(sys.argv[1:], cfg) # Set up logging functionality logfile = cfg['logging'].get('filename', fallback='benign.log') level = cfg['logging'].get('level', fallback='INFO').upper() logformat = '%(asctime)s %(message)s' logging.basicConfig(filename=logfile, level=level, format=logformat) print("Writing to log file {0} at level {1}.".format(logfile, level)) inputFile = cfg['inputFile'].get('majestic', fallback='majestic_million.csv') print("Opening input file {0}.".format(inputFile)) print("Starting processing at domain {0}".format(args.start)) print("Ending processing at domain {0}".format(args.end)) if cfg['benign'].getboolean('outputFile', fallback=True): outputFile = cfg['outputFile'].get('filename', fallback='benign.domains') print("Saving output to file {0}.".format(outputFile)) if cfg['benign'].getboolean('submitToCrits', fallback=False): url = cfg['crits'].get('url', '') username = cfg['crits'].get('user', '') source = cfg['crits'].get('source', '') print("Submitting domains to CRITs at: \n\tURL: {0}\n\tUser: {1}\n\tSource: {2}".format(url, username, source)) # Quick checks before entering the loop if args.start == 0: args.start = 1 if args.start > args.end: print("Starting # must be greater then ending #.\nExiting") sys.exit() if int(cfg['virustotal'].get('threshold', 0)) < 1: print("Threshold must be greater then 0, setting to 1") cfg['virustotal']['threshold'] = 1 print("\nResults:\n--------------------------------------------------------------") with open(inputFile) as infile: f_csv = csv.reader(infile) headings = next(f_csv) Row = namedtuple('Row', headings) for r in islice(f_csv, args.start - 1, args.end): row = Row(*r) print("Processing domain: {0} at position: {1}".format(row.Domain, f_csv.line_num - 1)) logging.info("Processing domain: {0} at position: {1}".format(row.Domain, f_csv.line_num - 1)) if cfg['benign'].getboolean('checkVirustotal', fallback=False): if not check_virustotal(row.Domain, cfg['virustotal'].get('key'), cfg['virustotal'].get('threshold')): continue if cfg['benign'].getboolean('outputFile', fallback=True): outputFile = cfg['outputFile'].get('filename', fallback='benign.domains') logging.info("\tWriting domain {0} to file {1}".format(row.Domain, outputFile)) with open(outputFile, 'at') as f: f.write(row.Domain + "\n") #print(row.Domain, file=f) if cfg['benign'].getboolean('submitToCrits', fallback=False): submit_crits(row.Domain, cfg) time.sleep(float(cfg['benign'].get('wait', fallback='1.0'))) if __name__ == "__main__": try: main() except KeyboardInterrupt: sys.exit()
py
1a44918212849aabba61bed919bda3e2eee183f3
# Copyright (c) 2008, Humanized, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Enso nor the names of its contributors may # be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # # enso # # ---------------------------------------------------------------------------- import logging class EventResponderList(object): """ Behaves like a dictionary with limited functionality. When it become non-empty, an event handler is registered for a particular event and called whenever the event occurs. When the it's empty, the event handler is unregistered and will not be called until it becomes non-empty again. """ def __init__(self, eventManager, eventName, responderFunc): self.__eventManager = eventManager self.__eventName = eventName self.__responderFunc = responderFunc self.__isRegistered = False self.__items = {} def __setitem__(self, key, value): """ if (not isinstance(item, slice) or not (item.start is None and item.stop is None)): raise NotImplementedError() """ self.__items[key] = value self.__onItemsChanged() def __delitem__(self, key): del self.__items[key] self.__onItemsChanged() def __iter__(self): for key, item in self.__items.items(): yield key, item def __onItemsChanged(self): if self.__items and (not self.__isRegistered): assert logging.debug( "Registering EventResponderList for onTimer event") or True self.__eventManager.registerResponder( self.__responderFunc, self.__eventName ) self.__isRegistered = True elif self.__isRegistered and (not self.__items): assert logging.debug( "Removing EventResponderList for onTimer event") or True self.__eventManager.removeResponder(self.__responderFunc) self.__isRegistered = False def fromlist(self, lst): self.__items = dict((id(item), item) for item in lst) self.__onItemsChanged() def clear(self): self.__items.clear() self.__onItemsChanged()
py
1a4491a53a054b6886b82aa83a8f19d249db133b
""" WSGI config for proyectoprincipal project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proyectoprincipal.settings') application = get_wsgi_application()