id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/LASExplanation-0.0.1.tar.gz/LASExplanation-0.0.1/README.md
# LAS This package is a brief wrap-up toolkit built based on 2 explanation packages: LIME and SHAP. The package contains 2 explainers: LIMEBAG and SHAP. It takes data and fitted models as input and returns explanations about feature importance ranks and/or weights. (etc. what attributes matter most within the prediction model). ## rq1.py The demo runs LIMEBAG on a default dataset. It generates and presents explanations about feature importance ranks and weights for all testing data points. Can be called by LIMEBAG.demo1() ## rq2.py The demo uses the explanations returned from LIMEBAG to run an effect size test. A summary of feature importance ranks and weights will be generated and presented as output. Can be called by LIMEBAG.demo2()
PypiClean
/Mesa-Geo-0.5.0.tar.gz/Mesa-Geo-0.5.0/mesa_geo/visualization/templates/js/MapModule.js
const MapModule = function (view, zoom, map_width, map_height, tiles, scale_options) { // Create the map tag const map_tag = document.createElement("div"); map_tag.style.width = map_width + "px"; map_tag.style.height = map_height + "px"; map_tag.style.border = "1px dotted"; map_tag.id = "mapid" const customView = (view !== null && zoom !== null) // Append it to #elements const elements = document.getElementById("elements"); elements.appendChild(map_tag); // Create Leaflet map and Agent layers const Lmap = L.map('mapid', {zoomSnap: 0.1}) if (customView) { Lmap.setView(view, zoom) } if (scale_options !== null) { L.control.scale(scale_options).addTo(Lmap) } let agentLayer = L.geoJSON().addTo(Lmap) // create tile layer if (tiles !== null) { if (tiles.kind === "raster_web_tile") { L.tileLayer(tiles.url, tiles.options).addTo(Lmap) } else if (tiles.kind === "wms_web_tile") { L.tileLayer.wms(tiles.url, tiles.options).addTo(Lmap) } else { throw new Error("Unknown tile type: " + tiles.kind) } } let hasFitBounds = false this.renderLayers = function (layers) { layers.rasters.forEach(function (layer) { L.imageOverlay(layer, layers.total_bounds).addTo(Lmap) }) layers.vectors.forEach(function (layer) { L.geoJSON(layer).addTo(Lmap) }) if (!hasFitBounds && !customView && layers.total_bounds.length !== 0) { Lmap.fitBounds(layers.total_bounds) hasFitBounds = true } } this.renderAgents = function (agents) { agentLayer.remove() agentLayer = L.geoJSON(agents, { onEachFeature: PopUpProperties, style: function (feature) { return feature.properties.style }, pointToLayer: function (feature, latlang) { return L.circleMarker(latlang, feature.properties.pointToLayer); } }).addTo(Lmap) } this.render = function (data) { this.renderLayers(data.layers) this.renderAgents(data.agents) } this.reset = function () { agentLayer.remove() } } function PopUpProperties(feature, layer) { let popupContent = '<table>' if (feature.properties.popupProperties) { for (const p in feature.properties.popupProperties) { popupContent += '<tr><td>' + p + '</td><td>' + feature.properties.popupProperties[p] + '</td></tr>' } } popupContent += '</table>' layer.bindPopup(popupContent) }
PypiClean
/Jalapeno-Lite-0.1.3.tar.gz/Jalapeno-Lite-0.1.3/Jalapeno/lib/siteMgr.py
import os import pickle from Jalapeno.lib.pardir import * APP_DIR = no_dot(par_dir()) SITES_FOLDER = APP_DIR+os.sep+'Jalapeno_data'+os.sep+'Sites' class Site(): def __init__(self,sitename): self.sitename = sitename @staticmethod def site_create(sitename): base_dir = SITES_FOLDER sitefolder = os.path.join(base_dir,sitename) subdir = {'Pages':None, 'build':None, '_config':['_config.yaml','flask_settings.py','profile.yaml'], 'source':['image','extension']} if not os.path.exists(sitefolder): print("creating \'%s\' site folder"%sitename) os.mkdir(sitefolder) os.mkdir(sitefolder+os.sep+'Pages') os.mkdir(sitefolder+os.sep+'Pages'+os.sep+'Draft') os.mkdir(sitefolder+os.sep+'build') os.mkdir(sitefolder+os.sep+'source') os.mkdir(sitefolder+os.sep+'source'+os.sep+'image') os.mkdir(sitefolder+os.sep+'source'+os.sep+'extension') os.mkdir(sitefolder+os.sep+'_config') config_folder = sitefolder+os.sep+'_config'+os.sep for each in subdir['_config']: f = open(config_folder+each,'w') f.close() f = open(config_folder+'flask_settings.py','w') f.write(temp()) f.close() else: pass @staticmethod def site_switch(sitename): g=open(SITES_FOLDER+os.sep+'.siterc','rb') sitelist,site = pickle.load(g) if sitename not in sitelist: print('Site not exist') return False f = open(SITES_FOLDER+os.sep+'.siterc','wb') pickle.dump((sitelist,sitename),f) f.close() print("Current site is '%s'"%sitename) return True @staticmethod def site_list_add(sitename): try: g=open(SITES_FOLDER+os.sep+'.siterc','rb') sitelist,site = pickle.load(g) g.close() sitelist.append(sitename) f=open(SITES_FOLDER+os.sep+'.siterc','wb') pickle.dump((sitelist,site),f) f.close() except: print('site_list_add Wrong') @staticmethod def get_site(): g=open(SITES_FOLDER+os.sep+'.siterc','rb') sitelist,sitename = pickle.load(g) if sitename not in sitelist: print('Site not exist') return return sitename SITE_DIR=SITES_FOLDER+os.sep+Site.get_site() def temp(): temp = ''' import os from Jalapeno.path import SITE_DIR from Jalapeno.lib.jalop_markdown import Jalop_markdown DEBUG = True THREADED = True IMAGE_DIR = SITE_DIR+os.sep+'source'+os.sep+'image' JS_EXTENSION_DIR = SITE_DIR+os.sep+'source'+os.sep+'extension' def parent_dir(path): return os.path.abspath(os.path.join(path,os.pardir)) PROJECT_ROOT = SITE_DIR+os.sep+'build' FREEZER_DESTINATION = PROJECT_ROOT FREEZER_REMOVE_EXTRA_FILES = False ''' return temp
PypiClean
/Azimuth-2.0.tar.gz/Azimuth-2.0/azimuth/corrstats.py
from __future__ import division __author__ = 'psinger' import numpy as np from scipy.stats import t, norm from math import atanh, pow from numpy import tanh def rz_ci(r, n, conf_level = 0.95): zr_se = pow(1/(n - 3), .5) moe = norm.ppf(1 - (1 - conf_level)/float(2)) * zr_se zu = atanh(r) + moe zl = atanh(r) - moe return tanh((zl, zu)) def rho_rxy_rxz(rxy, rxz, ryz): num = (ryz-1/2.*rxy*rxz)*(1-pow(rxy,2)-pow(rxz,2)-pow(ryz,2))+pow(ryz,3) den = (1 - pow(rxy,2)) * (1 - pow(rxz,2)) return num/float(den) def dependent_corr(xy, xz, yz, n, twotailed=True, conf_level=0.95, method='steiger'): """ Calculates the statistic significance between two dependent correlation coefficients @param xy: correlation coefficient between x and y @param xz: correlation coefficient between x and z @param yz: correlation coefficient between y and z @param n: number of elements in x, y and z @param twotailed: whether to calculate a one or two tailed test, only works for 'steiger' method @param conf_level: confidence level, only works for 'zou' method @param method: defines the method uses, 'steiger' or 'zou' @return: t and p-val """ if method == 'steiger': d = xy - xz determin = 1 - xy * xy - xz * xz - yz * yz + 2 * xy * xz * yz av = (xy + xz)/2 cube = (1 - yz) * (1 - yz) * (1 - yz) t2 = d * np.sqrt((n - 1) * (1 + yz)/(((2 * (n - 1)/(n - 3)) * determin + av * av * cube))) p = 1 - t.cdf(abs(t2), n - 2) if twotailed: p *= 2 return t2, p elif method == 'zou': L1 = rz_ci(xy, n, conf_level=conf_level)[0] U1 = rz_ci(xy, n, conf_level=conf_level)[1] L2 = rz_ci(xz, n, conf_level=conf_level)[0] U2 = rz_ci(xz, n, conf_level=conf_level)[1] rho_r12_r13 = rho_rxy_rxz(xy, xz, yz) lower = xy - xz - pow((pow((xy - L1), 2) + pow((U2 - xz), 2) - 2 * rho_r12_r13 * (xy - L1) * (U2 - xz)), 0.5) upper = xy - xz + pow((pow((U1 - xy), 2) + pow((xz - L2), 2) - 2 * rho_r12_r13 * (U1 - xy) * (xz - L2)), 0.5) return lower, upper else: raise Exception('Wrong method!') def independent_corr(xy, ab, n, n2 = None, twotailed=True, conf_level=0.95, method='fisher'): """ Calculates the statistic significance between two independent correlation coefficients @param xy: correlation coefficient between x and y @param xz: correlation coefficient between a and b @param n: number of elements in xy @param n2: number of elements in ab (if distinct from n) @param twotailed: whether to calculate a one or two tailed test, only works for 'fisher' method @param conf_level: confidence level, only works for 'zou' method @param method: defines the method uses, 'fisher' or 'zou' @return: z and p-val """ if method == 'fisher': xy_z = 0.5 * np.log((1 + xy)/(1 - xy)) xz_z = 0.5 * np.log((1 + ab)/(1 - ab)) if n2 is None: n2 = n se_diff_r = np.sqrt(1/(n - 3) + 1/(n2 - 3)) diff = xy_z - xz_z z = abs(diff / se_diff_r) p = (1 - norm.cdf(z)) if twotailed: p *= 2 return z, p elif method == 'zou': L1 = rz_ci(xy, n, conf_level=conf_level)[0] U1 = rz_ci(xy, n, conf_level=conf_level)[1] L2 = rz_ci(ab, n2, conf_level=conf_level)[0] U2 = rz_ci(ab, n2, conf_level=conf_level)[1] lower = xy - ab - pow((pow((xy - L1), 2) + pow((U2 - ab), 2)), 0.5) upper = xy - ab + pow((pow((U1 - xy), 2) + pow((ab - L2), 2)), 0.5) return lower, upper else: raise Exception('Wrong method!') #print dependent_corr(.396, .179, .088, 200, method='steiger') #print independent_corr(.560, .588, 100, 353, method='fisher') #print dependent_corr(.396, .179, .088, 200, method='zou') #print independent_corr(.560, .588, 100, 353, method='zou')
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/storage/sqlite_storage.py
import inspect import sqlite3 import time from threading import Lock from typing import List, Tuple, Any from fipper import raw from .storage import Storage from .. import utils # language=SQLite SCHEMA = """ CREATE TABLE sessions ( dc_id INTEGER PRIMARY KEY, api_id INTEGER, test_mode INTEGER, auth_key BLOB, date INTEGER NOT NULL, user_id INTEGER, is_bot INTEGER ); CREATE TABLE peers ( id INTEGER PRIMARY KEY, access_hash INTEGER, type INTEGER NOT NULL, username TEXT, phone_number TEXT, last_update_on INTEGER NOT NULL DEFAULT (CAST(STRFTIME('%s', 'now') AS INTEGER)) ); CREATE TABLE version ( number INTEGER PRIMARY KEY ); CREATE INDEX idx_peers_id ON peers (id); CREATE INDEX idx_peers_username ON peers (username); CREATE INDEX idx_peers_phone_number ON peers (phone_number); CREATE TRIGGER trg_peers_last_update_on AFTER UPDATE ON peers BEGIN UPDATE peers SET last_update_on = CAST(STRFTIME('%s', 'now') AS INTEGER) WHERE id = NEW.id; END; """ def get_input_peer(peer_id: int, access_hash: int, peer_type: str): if peer_type in ["user", "bot"]: return raw.types.InputPeerUser( user_id=peer_id, access_hash=access_hash ) if peer_type == "group": return raw.types.InputPeerChat( chat_id=-peer_id ) if peer_type in ["channel", "supergroup"]: return raw.types.InputPeerChannel( channel_id=utils.get_channel_id(peer_id), access_hash=access_hash ) raise ValueError(f"Invalid peer type: {peer_type}") class SQLiteStorage(Storage): VERSION = 3 USERNAME_TTL = 8 * 60 * 60 def __init__(self, name: str): super().__init__(name) self.conn = None # type: sqlite3.Connection self.lock = Lock() def create(self): with self.lock, self.conn: self.conn.executescript(SCHEMA) self.conn.execute( "INSERT INTO version VALUES (?)", (self.VERSION,) ) self.conn.execute( "INSERT INTO sessions VALUES (?, ?, ?, ?, ?, ?, ?)", (2, None, None, None, 0, None, None) ) async def open(self): raise NotImplementedError async def save(self): await self.date(int(time.time())) with self.lock: self.conn.commit() async def close(self): with self.lock: self.conn.close() async def delete(self): raise NotImplementedError async def update_peers(self, peers: List[Tuple[int, int, str, str, str]]): with self.lock: self.conn.executemany( "REPLACE INTO peers (id, access_hash, type, username, phone_number)" "VALUES (?, ?, ?, ?, ?)", peers ) async def get_peer_by_id(self, peer_id: int): r = self.conn.execute( "SELECT id, access_hash, type FROM peers WHERE id = ?", (peer_id,) ).fetchone() if r is None: raise KeyError(f"ID not found: {peer_id}") return get_input_peer(*r) async def get_peer_by_username(self, username: str): r = self.conn.execute( "SELECT id, access_hash, type, last_update_on FROM peers WHERE username = ?" "ORDER BY last_update_on DESC", (username,) ).fetchone() if r is None: raise KeyError(f"Username not found: {username}") if abs(time.time() - r[3]) > self.USERNAME_TTL: raise KeyError(f"Username expired: {username}") return get_input_peer(*r[:3]) async def get_peer_by_phone_number(self, phone_number: str): r = self.conn.execute( "SELECT id, access_hash, type FROM peers WHERE phone_number = ?", (phone_number,) ).fetchone() if r is None: raise KeyError(f"Phone number not found: {phone_number}") return get_input_peer(*r) def _get(self): attr = inspect.stack()[2].function return self.conn.execute( f"SELECT {attr} FROM sessions" ).fetchone()[0] def _set(self, value: Any): attr = inspect.stack()[2].function with self.lock, self.conn: self.conn.execute( f"UPDATE sessions SET {attr} = ?", (value,) ) def _accessor(self, value: Any = object): return self._get() if value == object else self._set(value) async def dc_id(self, value: int = object): return self._accessor(value) async def api_id(self, value: int = object): return self._accessor(value) async def test_mode(self, value: bool = object): return self._accessor(value) async def auth_key(self, value: bytes = object): return self._accessor(value) async def date(self, value: int = object): return self._accessor(value) async def user_id(self, value: int = object): return self._accessor(value) async def is_bot(self, value: bool = object): return self._accessor(value) def version(self, value: int = object): if value == object: return self.conn.execute( "SELECT number FROM version" ).fetchone()[0] else: with self.lock, self.conn: self.conn.execute( "UPDATE version SET number = ?", (value,) )
PypiClean
/Friendly-Iter-0.0.1.tar.gz/Friendly-Iter-0.0.1/friendly_iter/iters.py
from multiprocessing import Process, Queue from multiprocessing.queues import Empty from .iterator_modifiers import flatten, skip, step, take DEFAULT_NUMBER_OF_WORKERS = 4 class Iterator: """The friendly convenience wrapper over Python iterators.""" def __init__(self, iterable): self.iter = iter(iterable) def fork(self, n_jobs=DEFAULT_NUMBER_OF_WORKERS): return ParallelIterator(self.iter, n_jobs=n_jobs) def map(self, func): """Replace each item with the result of func(item).""" self.iter = map(func, self.iter) return self def filter(self, func): """Keep only items for which func(item) is true.""" self.iter = filter(func, self.iter) return self def flatten(self): self.iter = flatten(self.iter) return self def enumerate(self, start: int = 0): self.iter = enumerate(self.iter, start) def take(self, n: int): """Stop the iterator after the first `n` items.""" self.iter = take(n, self.iter) return self def skip(self, n: int): """Skip the first `n` items.""" self.iter = skip(n, self.iter) return self def step(self, n: int): """Yield every `n`th item, starting with the first.""" self.iter = step(n, self.iter) return self def __iter__(self): return self.iter class UnorderedIterator: """Like `Iterator` but does not make any guarantees about the order of items. The main difference to the normal `Iterator` is that this class does not expose any methods that depend on the particular order of elements, like `enumerate`, `skip`, or `step`. """ def __init__(self, iterable): self.iter = iter(iterable) def fork(self, n_jobs=DEFAULT_NUMBER_OF_WORKERS): return ParallelIterator(self.iter, n_jobs=n_jobs) def map(self, func): """Replace each item with the result of func(item).""" self.iter = map(func, self.iter) return self def filter(self, func): """Keep only items for which func(item) is true.""" self.iter = filter(func, self.iter) return self def flatten(self): self.iter = flatten(self.iter) return self def take(self, n: int): """Stop the iterator after `n` items. Note that these don't have to be the *first* `n` items because the unordered iterator does not guarantee any order. """ self.iter = take(n, self.iter) return self def __iter__(self): return self.iter class ParallelIterator: """Like `UnorderedIterator` but distributes work over several parallel processes. Does not expose any of the iterator methods that cannot be meaningfully parallelized (like `take` for example). """ def __init__(self, iterable, n_jobs=DEFAULT_NUMBER_OF_WORKERS): self.n_jobs = n_jobs self.input_iter = iter(iterable) self.pipeline = IteratorPipeline() def map(self, func): """Replace each item with the result of func(item).""" self.pipeline.add_transform(map, func) return self def filter(self, func): """Keep only items for which func(item) is true.""" self.pipeline.add_transform(filter, func) return self def flatten(self): self.pipeline.add_transform(flatten) return self def join(self): wc = WorkCoordinator(n_workers=self.n_jobs) output_iter = wc.run(self.input_iter, self.pipeline) return UnorderedIterator(output_iter) class WorkCoordinator: def __init__(self, n_workers, input_buffer_size=None, result_poll_interval=0.1): self.distributor = Queue() self.collector = Queue() self.n_workers = n_workers self.input_buffer_size = input_buffer_size or n_workers * 2 self.poll_interval = result_poll_interval self.workers = [] self.active_workers = 0 def run(self, input_iter, pipeline): self.start_workers(pipeline) yield from self.balance_inputs_and_outputs(input_iter) self.stop_workers() yield from self.remaining_results() def start_workers(self, pipeline): self.active_workers = self.n_workers self.workers = [Process(target=worker, args=(pipeline, self.distributor, self.collector, i)) for i in range(self.n_workers)] for w in self.workers: w.start() def stop_workers(self): for _ in self.workers: self.distributor.put(STOP_WORKER) for w in self.workers: w.join() def balance_inputs_and_outputs(self, input_iter): for x in input_iter: while self.input_queue_is_full(): yield from self.try_get_result() self.distributor.put(x) def input_queue_is_full(self): return self.distributor.qsize() >= self.input_buffer_size def try_get_result(self): try: y = self.collector.get(timeout=self.poll_interval) yield y except Empty: pass def remaining_results(self): while self.active_workers > 0: y = self.collector.get() if y == DONE_WORKER: self.active_workers -= 1 else: yield y def worker(pipeline, distributor, collector, i): def get(): while True: x = distributor.get() if x == STOP_WORKER: return yield x for x in pipeline.apply(get()): collector.put(x) collector.put(DONE_WORKER) STOP_WORKER = b'STOP' DONE_WORKER = b'DONE' class IteratorPipeline: def __init__(self): self.transformers = [] def add_transform(self, func, *args): self.transformers.append((func, args)) def apply(self, input_iter): it = input_iter for func, args in self.transformers: it = func(*args, it) return it
PypiClean
/fangnao-0.1.0.tar.gz/FangNao-0.1.0/docs/apis/core.rst
npbrain.core package ==================== .. currentmodule:: npbrain.core .. automodule:: npbrain.core .. contents:: :local: :depth: 2 Numerical integration methods ----------------------------- The most commonly used function is `integrate`: .. autosummary:: :toctree: _autosummary integrate Methods for ordinary differential equations. .. autosummary:: :toctree: _autosummary ode_euler ode_rk2 midpoint ode_heun ode_rk3 ode_rk4 ode_rk4_alternative ode_backward_euler trapezoidal_rule ode_exponential_euler Methods for stochastic differential equations. .. autosummary:: :toctree: _autosummary sde_euler Milstein_dfree_Ito sde_heun Milstein_dfree_Stra sde_exponential_euler Neurons ------- .. autosummary:: :toctree: _autosummary judge_spike initial_neu_state format_geometry format_refractory generate_fake_neuron .. autoclass:: Neurons :members: Synapses -------- .. autosummary:: :toctree: _autosummary format_delay initial_syn_state .. autoclass:: Synapses :members: Monitors -------- .. autoclass:: Monitor :members: .. autoclass:: SpikeMonitor :members: .. autoclass:: StateMonitor :members: .. autosummary:: :toctree: _autosummary raster_plot firing_rate Network ------- .. autoclass:: Network :members: add, run, run_time
PypiClean
/HiCAssembler-1.1.1.tar.gz/HiCAssembler-1.1.1/hicassembler/HiCAssembler.py
import numpy as np import networkx as nx import time import os.path import copy import sys import hicexplorer.HiCMatrix as HiCMatrix import hicexplorer.hicMergeMatrixBins import hicexplorer.hicFindTADs as hicFindTADs from functools import wraps from hicassembler.Scaffolds import Scaffolds import logging log = logging.getLogger("HiCAssembler") log.setLevel(logging.DEBUG) POWER_LAW_DECAY = 2**(-1.08) # expected exponential decay at 2*distance MIN_LENGTH = 400000 # minimum contig or PE_scaffold length to consider ZSCORE_THRESHOLD = -1 # zscore threshold to declare a boundary a misassembly MIN_MAD = -0.5 # minimum zscore row contacts to filter low scoring bins MAX_MAD = 50 # maximum zscore row contacts def timeit(fn): @wraps(fn) def with_profiling(*args, **kwargs): start_time = time.time() ret = fn(*args, **kwargs) elapsed_time = time.time() - start_time log.info("{} took {}".format(fn.__name__, elapsed_time)) return ret return with_profiling class HiCAssembler: def __init__(self, hic_file_name, fasta_file, out_folder, min_mad=MIN_MAD, max_mad=MAX_MAD, split_misassemblies=True, split_positions_file=None, min_scaffold_length=MIN_LENGTH, matrix_bin_size=25000, use_log=False, num_processors=5, misassembly_zscore_threshold=ZSCORE_THRESHOLD, num_iterations=2, scaffolds_to_ignore=None): """ Prepares a hic matrix for assembly. It is expected that initial contigs or scaffolds contain bins of restriction fragment size. Parameters ---------- hic_file_name : hic file name or a HiCMatrix object min_mad : minimum MAD score value per bin max_mad : maximum MAD score value per bin Returns ------- """ # The list is modified with each iteration replacing its members # by lists. After two iterations a scaffold # list could look like: [[0],[1,2,3]] # which means that there are two scaffolds # one of those is composed of the contigs 1, 2 and 3 # replace the diagonal from the matrix by zeros # hic.diagflat(0) self.fasta_file = fasta_file self.out_folder = out_folder self.min_mad = min_mad self.max_mad = max_mad self.min_scaffold_length = min_scaffold_length self.num_processors = num_processors self.misassembly_threshold = misassembly_zscore_threshold self.merged_paths = None self.num_iterations = num_iterations self.iteration = 0 if not isinstance(hic_file_name, str): # assume that the hic given is already a HiCMatrix object # this is normally used for testing self.hic = hic_file_name else: log.info("Loading Hi-C matrix ... ") # check if a lower resolution matrix is available self.load_hic_matrix(hic_file_name, split_misassemblies, split_positions_file, matrix_bin_size) if use_log: self.hic.matrix.data = np.log1p(self.hic.matrix.data) # build scaffolds graph. Bins on the same contig are # put together into a path (a type of graph with max degree = 2) self.scaffolds_graph = Scaffolds(copy.deepcopy(self.hic), self.out_folder) if scaffolds_to_ignore is not None: for scaffold in scaffolds_to_ignore: log.info("Removing scaffold {} from assembly".format(scaffold)) if scaffold in self.scaffolds_graph.scaffold.node: self.scaffolds_graph._remove_bin_path(self.scaffolds_graph.scaffold.node[scaffold]['path'], split_scaffolds=True) else: log.warn("Scaffold {} is not part of the assembly".format(scaffold)) self.plot_matrix(self.out_folder + "/before_assembly.pdf", title="After split mis-assemblies assembly", add_vlines=True) mat_size = self.hic.matrix.shape[:] # remove contigs that are too small self.scaffolds_graph.remove_small_paths(self.min_scaffold_length, split_scaffolds=True) assert mat_size == self.scaffolds_graph.hic.matrix.shape self.N50 = [] def load_hic_matrix(self, hic_file_name, split_misassemblies, split_positions_file, matrix_bin_size): """ Checks if a already processed matrix is present and loads it. If not the high resolution matrix is loaded, the misasemblies are split and the lower resolution matrix is saved. Parameters ---------- hic_file_name name of a hic file or a HiCMatrix object split_misassemblies bool If true, the TAD calling algorithm is used to identify misassemblies split_positions_file file containing manual split positions in bed format matrix_bin_size bin size of matrix Returns ------- """ merged_bins_matrix_file = self.out_folder + "/hic_merged_bins_matrix.h5" if os.path.isfile(merged_bins_matrix_file): log.info("Found reduced matrix file {}".format(merged_bins_matrix_file)) self.hic = HiCMatrix.hiCMatrix(merged_bins_matrix_file) else: self.hic = HiCMatrix.hiCMatrix(hic_file_name) if split_misassemblies: # try to find contigs that probably should be separated self.split_misassemblies(hic_file_name, split_positions_file) log.info("Merging bins of file to reduce resolution") binsize = self.hic.getBinSize() if binsize < matrix_bin_size: # make an smaller matrix having bins of around 25.000 bp num_bins = matrix_bin_size / binsize log.info("Reducing matrix size to {:,} bp (number of bins merged: {})".format(binsize, num_bins)) self.hic = HiCAssembler.merge_bins(self.hic, num_bins) self.hic.save(merged_bins_matrix_file) self.hic = HiCMatrix.hiCMatrix(merged_bins_matrix_file) def assemble_contigs(self): """ Returns ------- """ log.debug("Size of matrix is {}".format(self.scaffolds_graph.hic.matrix.shape[0])) for iteration in range(self.num_iterations): self.iteration = iteration self.scaffolds_graph.iteration = iteration n50 = self.scaffolds_graph.compute_N50() self.scaffolds_graph.get_paths_stats() log.debug("iteration: {}\tN50: {:,}".format(iteration, n50)) self.N50.append(n50) # the first iteration is is more stringent if iteration < 3: target_size = int(min(2e6, self.scaffolds_graph.paths_min * (iteration + 1))) log.debug("Merging small bins in larger bins of size {} bp".format(target_size)) self.scaffolds_graph.split_and_merge_contigs(num_splits=3, target_size=target_size, normalize_method='ice') stats = self.scaffolds_graph.get_stats_per_split() try: # stats[2] contains the mean, median, max, min and len(number of samples) # for bins whose start position is about the distance of two # bins or in other words that are separated by one bin conf_score = stats[2]['median'] * 0.9 # if the scaffolds are all very small, the get_stats_per_split # many not have enough information to compute, thus a second # method to identify confidence score is used except KeyError: conf_score = np.percentile(self.scaffolds_graph.matrix.data, 5) log.debug("Confidence score set to {}".format(conf_score)) else: # self.scaffolds_graph.split_and_merge_contigs(num_splits=1, target_size=int(1e6), # normalize_method='ice') self.scaffolds_graph.split_and_merge_contigs(num_splits=1, normalize_method='ice') conf_score = np.percentile(self.scaffolds_graph.matrix.data, 30) log.info("Confidence score set to: {}".format(conf_score)) self.scaffolds_graph.join_paths_max_span_tree(conf_score, node_degree_threshold=2e3, hub_solving_method='remove weakest') if iteration == 0: self.scaffolds_graph.remove_small_paths(self.min_scaffold_length, split_scaffolds=True) self.plot_matrix(self.out_folder + "/after_assembly_{}.pdf".format(iteration), title="Assembly iteration {}".format(iteration), add_vlines=True) before_assembly_length, before_num_paths = self.scaffolds_graph.get_assembly_length() self.put_back_small_scaffolds() after_assembly_length, after_num_paths = self.scaffolds_graph.get_assembly_length() diff = after_assembly_length - before_assembly_length log.info('{:,} bp ({:.2%}) were added back to the assembly'. format(diff, float(diff) / self.scaffolds_graph.total_length)) log.info('Total assembly length: {:,} bp ({:.2%})'. format(after_assembly_length, float(after_assembly_length) / self.scaffolds_graph.total_length)) self.plot_matrix(self.out_folder + "/after_put_scaff_back.pdf". format(iteration), title="After assembly", add_vlines=True) hic = self.reorder_matrix(max_num_bins=int(1e6), rename_scaffolds=True) hic.save(self.out_folder + "/final_matrix.h5") print self.N50 return self.get_contig_order() def make_scaffold_network(self, orig_scaff, confidence_score=None): """ Parameters ---------- orig_scaff confidence_score minimum value in the matrix Returns ------- Examples -------- >>> import tempfile >>> dirpath = tempfile.mkdtemp(prefix="hicassembler_test_") >>> from hicassembler.Scaffolds import get_test_matrix as get_test_matrix >>> cut_intervals = [('c-0', 0, 10, 1), ('c-0', 10, 30, 2), ('c-1', 0, 10, 1), ... ('c-1', 10, 20, 1), ('c-2', 0, 10, 1), ('c-2', 10, 30, 1)] >>> hic = get_test_matrix(cut_intervals=cut_intervals) >>> H = HiCAssembler(hic, "", dirpath, split_misassemblies=False, ... min_scaffold_length=20, use_log=False) >>> H.scaffolds_graph.split_and_merge_contigs(num_splits=1, normalize_method='none') >>> H.scaffolds_graph.add_edge(0, 1) >>> list(H.scaffolds_graph.matrix_bins.get_all_paths()) [[0, 1, 5, 4]] >>> orig_scaff = Scaffolds(H.hic) >>> orig_scaff.split_and_merge_contigs(num_splits=1, normalize_method='none') In orig_scaff only the scaffold paths are represented. It contains all scaffolds including deleted ones in self.scaffold_graph. No connections between scaffolds are present >>> list(orig_scaff.matrix_bins.get_all_paths()) [[0, 1], [2, 3], [4, 5]] >>> G = H.make_scaffold_network(orig_scaff) >>> list(G.edges(data=True)) [('c-2', 'c-1', {'weight': 16.0}), ('c-2', 'c-0', {'weight': 42.0}), \ ('c-1', 'c-0', {'weight': 28.0})] >>> import shutil >>> shutil.rmtree(dirpath) """ nxG = nx.Graph() for node_id, node in orig_scaff.scaffold.node.iteritems(): nn = node.copy() for attr, value in nn.iteritems(): if isinstance(value, np.int64): nn[attr] = int(value) elif isinstance(value, np.float64): nn[attr] = float(value) elif isinstance(value, list): nn[attr] = ", ".join([str(x) for x in value]) elif isinstance(value, np.string_): nn[attr] = str(value) if node_id in self.scaffolds_graph.scaffold.node: nn['is_backbone'] = 1 nxG.add_node(node_id, **nn) matrix = orig_scaff.matrix.tocoo() matrix.setdiag(0) max_weight = float(orig_scaff.matrix.max() * 1.5) for u, v, weight in zip(matrix.row, matrix.col, matrix.data): if u == v: continue if weight < confidence_score: continue scaff_u = orig_scaff.pg_base.node[u]['name'] scaff_v = orig_scaff.pg_base.node[v]['name'] nxG.add_edge(scaff_u, scaff_v, weight=float(weight)) if scaff_u in self.scaffolds_graph.scaffold.node and \ scaff_v in self.scaffolds_graph.scaffold.node and \ scaff_u in self.scaffolds_graph.scaffold.adj[scaff_v]: # u and v are directly joined nxG.add_edge(scaff_u, scaff_v, weight=float(max_weight)) # add all contacts between assembled nodes that may not have been # present in the graph for path in self.scaffolds_graph.scaffold.get_all_paths(): for scaff_u, scaff_v in zip(path[:-1], path[1:]): nxG.add_edge(scaff_u, scaff_v, weight=float(max_weight)) return nxG @staticmethod def _remove_weakest(G, exclude=[]): """ Based on the maximum spanning tree graph hubs are resolved by removing the weakest links until only two edges are left For a maximum spanning tree like this: o---o---o---o---o---o--o \ --o--o The algorithm works as follows: 1. Sort the node degree in decreasing order 2. For each node with degree > 0 leave only the two edges with the highest weight Parameters ---------- G : maximum spanning tree networkx graph exclude : list of nodes to exclude from removing links. Returns ------- G """ node_degree_mst = dict(G.degree(G.node.keys())) for node, degree in sorted(node_degree_mst.iteritems(), key=lambda (k, v): v, reverse=True): if degree > 2 and node not in exclude: adj = sorted(G.adj[node].iteritems(), key=lambda (k, v): v['weight']) # remove the weakest edges but only if either of the nodes is not a hub for adj_node, attr in adj[:-2]: log.debug("Removing weak edge {}-{} weight: {}".format(node, adj_node, attr['weight'])) G.remove_edge(node, adj_node) if degree <= 2: break return G @staticmethod def _find_backbone_node(graph): """ Given a networkx graph, identifies the node (or nodes that are backgbone) that is labeled as backbone. This function is called as part of put_back_small_scaffolds Parameters ---------- graph : networkX graph. Returns ------- set of backbone nodes """ backbones = set() for node_id, attr in graph.node.iteritems(): if 'is_backbone' in attr: backbones.add(node_id) return backbones @staticmethod def _get_subgraph_containing_node(graph, target_node): """ Returns the subgraph of `graph` containing the given node Parameters ---------- graph : NetworkX graph target_node : node id Returns ------- Networkx graph or None if the node is not in the graph """ for sub_graph in nx.connected_component_subgraphs(graph): if target_node in sub_graph: return sub_graph return None @staticmethod def _get_paths_from_backbone(graph, backbone_node): """ Returns all paths that contain the backbone The graph used should not contain nodes with degree > 2 except for the backbone node: eg. o--*--o--o | o but not: o--o--*--o | o Parameters ---------- graph : Networkx graph backbone_node: node id Returns ------- path list, where each path has the backbone as the first element Examples -------- >>> G = nx.Graph() >>> G.add_edge('backbone', 1, weight=10) >>> G.add_edge(1, 2, weight=5) >>> G.add_edge(2, 3, weight=6) >>> G.add_edge('backbone', 4, weight=5) >>> G.add_edge(4, 5, weight=10) >>> HiCAssembler._get_paths_from_backbone(G, 'backbone') [['backbone', 1, 2, 3], ['backbone', 4, 5]] """ # get backbone_id neighbors path_list = [] seen = set([backbone_node]) for adj, weight in sorted(graph.adj[backbone_node].iteritems(), key=lambda (k, v): v['weight'])[::-1]: path = [backbone_node] while True: path.append(adj) seen.add(adj) adj_list = [x for x in graph.adj[adj].keys() if x not in seen] if len(adj_list) == 0: break adj = adj_list[0] path_list.append(path) return path_list def put_back_small_scaffolds(self, normalize_method='ice'): """ Identifies scaffolds that were removed from the Hi-C assembly and tries to find their correct location. Returns ------- Examples -------- >>> from hicassembler.Scaffolds import get_test_matrix as get_test_matrix >>> cut_intervals = [('c-0', 0, 10, 1), ('c-0', 10, 30, 2), ('c-1', 0, 10, 1), ... ('c-1', 10, 20, 1), ('c-2', 0, 10, 1), ('c-2', 10, 30, 1)] >>> hic = get_test_matrix(cut_intervals=cut_intervals) >>> import tempfile >>> dirpath = tempfile.mkdtemp() >>> H = HiCAssembler(hic, "", dirpath, split_misassemblies=False, min_scaffold_length=20, use_log=False) >>> H.scaffolds_graph.split_and_merge_contigs(num_splits=1, normalize_method='none') >>> H.scaffolds_graph.add_edge(0, 1) >>> list(H.scaffolds_graph.matrix_bins.get_all_paths()) [[0, 1, 5, 4]] >>> H.put_back_small_scaffolds() >>> list(H.scaffolds_graph.matrix_bins.get_all_paths()) [[0, 1, 2, 3, 5, 4]] >>> import shutil >>> shutil.rmtree(dirpath) # larger test >>> from hicassembler.Scaffolds import get_test_matrix as get_test_matrix >>> cut_intervals = [('c-0', 0, 20, 1), ('c-0', 20, 40, 2), ... ('c-1', 10, 20, 1), ('c-1', 20, 30, 1), ... ('c-2', 0, 10, 1), ('c-2', 10, 20, 1), ... ('c-3', 0, 10, 1), ('c-3', 10, 20, 1), ... ('c-4', 0, 20, 1), ('c-4', 20, 40, 1)] >>> from scipy.sparse import csr_matrix >>> A = csr_matrix(np.array( ... [[50, 19, 9, 8, 5, 3, 2, 1, 0, 0], ... [ 0, 50, 20, 9, 8, 5, 3, 2, 1, 0], ... [ 0, 0, 50, 19, 9, 8, 5, 3, 2, 1], ... [ 0, 0, 0, 50, 19, 9, 8, 5, 3, 2], ... [ 0, 0, 0, 0, 50, 19, 9, 8, 5, 3], ... [ 0, 0, 0, 0, 0, 50, 19, 9, 8, 5], ... [ 0, 0, 0, 0, 0, 0, 50, 19, 9, 8], ... [ 0, 0, 0, 0, 0, 0, 0, 50, 19, 9], ... [ 0, 0, 0, 0, 0, 0, 0, 0, 50, 19], ... [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 50]])) >>> hic = get_test_matrix(cut_intervals=cut_intervals, matrix=A) # scramble matrix # the scrambled order is [c-3 (inv), c-1(inv), c-0, c-4, c-2] >>> scrambled_bins = [7,6, 3,2, 0,1, 8,9, 5,4] >>> matrix = hic.matrix[scrambled_bins, :][:, scrambled_bins] # the intervals are shuffled but not the direction >>> scrambled_intervals = [6,7, 2,3, 0,1, 8,9, 5,4] >>> cut_intervals = [cut_intervals[x] for x in scrambled_intervals] >>> hic.setMatrix(matrix, cut_intervals) >>> hic.matrix.todense() matrix([[100, 19, 5, 3, 1, 2, 19, 9, 9, 8], [ 19, 100, 8, 5, 2, 3, 9, 8, 19, 9], [ 5, 8, 100, 19, 8, 9, 3, 2, 9, 19], [ 3, 5, 19, 100, 9, 20, 2, 1, 8, 9], [ 1, 2, 8, 9, 100, 19, 0, 0, 3, 5], [ 2, 3, 9, 20, 19, 100, 1, 0, 5, 8], [ 19, 9, 3, 2, 0, 1, 100, 19, 8, 5], [ 9, 8, 2, 1, 0, 0, 19, 100, 5, 3], [ 9, 19, 9, 8, 3, 5, 8, 5, 100, 19], [ 8, 9, 19, 9, 5, 8, 5, 3, 19, 100]]) >>> dirpath = tempfile.mkdtemp() >>> H = HiCAssembler(hic, "", dirpath, split_misassemblies=False, min_scaffold_length=20, use_log=False) >>> H.scaffolds_graph.split_and_merge_contigs(num_splits=1, normalize_method='none') the shorter scaffolds are removed: c-1, c-2, and c-4. add edge between remaining scaffolds >>> H.scaffolds_graph.add_edge(0, 1) After adding the edge the network looks like c-0 c-4 o---o c-1, c-2, c-3 removed >>> list(H.scaffolds_graph.scaffold.get_all_paths()) [['c-0', 'c-4']] >>> H.put_back_small_scaffolds(normalize_method='none') The intermediate maximum spanning tree that is created looks like: c-0 c-4 o===o / \ c-1 o o c-3 \ o c-2 (==) denotes `backbone` edge, i.e, a edge that was established using the larger scaffolds In the algorithm, the backbone edges are removed as well as all backbone nodes not attached to a removed scaffold. After this step, the previous network now looks like: c-0 c-4 o o / \ c-1 o o c-3 \ o c-2 Next, each branch is considered independently (e.g. [c-0, c-1]). The orientation of the scaffolds is determined pairwise using the find_best_permutation method on the scaffold path. E.g. for c-0 the matrix nodes path is [4, 5]. Once the orientation is known, the edge of the scaffold backbone is deleted to allow the insertion of the branch at that position. >>> list(H.scaffolds_graph.scaffold.get_all_paths()) [['c-0', 'c-1', 'c-2', 'c-3', 'c-4']] # The resulting matrix should be ordered >>> hic = H.reorder_matrix() >>> hic.matrix.todense() matrix([[100, 19, 9, 8, 5, 3, 2, 1, 0, 0], [ 19, 100, 20, 9, 8, 5, 3, 2, 1, 0], [ 9, 20, 100, 19, 9, 8, 5, 3, 2, 1], [ 8, 9, 19, 100, 19, 9, 8, 5, 3, 2], [ 5, 8, 9, 19, 100, 19, 9, 8, 5, 3], [ 3, 5, 8, 9, 19, 100, 19, 9, 8, 5], [ 2, 3, 5, 8, 9, 19, 100, 19, 9, 8], [ 1, 2, 3, 5, 8, 9, 19, 100, 19, 9], [ 0, 1, 2, 3, 5, 8, 9, 19, 100, 19], [ 0, 0, 1, 2, 3, 5, 8, 9, 19, 100]]) >>> shutil.rmtree(dirpath) """ log.info("Total assembly length before adding scaffolds back: {:,}". format(self.scaffolds_graph.get_assembly_length()[0])) # create orig_scaff once using a min_scaffold length as size target to # compute confidence scores orig_scaff = Scaffolds(self.hic) orig_scaff.split_and_merge_contigs(num_splits=1, target_size=self.min_scaffold_length, normalize_method=normalize_method) orig_stats = orig_scaff.get_stats_per_split() conf_score = orig_stats[1]['median'] # re make orig_scaff a second time without splitting the scaffolds # as this is the structure needed for rest of the program orig_scaff = Scaffolds(self.hic) orig_scaff.split_and_merge_contigs(num_splits=1, normalize_method=normalize_method) # reset pb_base self.scaffolds_graph.pg_base = copy.deepcopy(self.scaffolds_graph.matrix_bins) nxG = self.make_scaffold_network(orig_scaff, confidence_score=conf_score) nxG = nx.maximum_spanning_tree(nxG, weight='weight') nx.write_graphml(nxG, self.out_folder + "/mst_for_small_Scaff_integration.graphml".format()) # 1. Identify branches # delete backbone nodes that are not adjacent to a removed scaffold (removed scaffolds are those # small contig/scaffolds removed at the beginning that are stored in the self.scaffolds_graph.remove_scaffolds. # Basically, all so called backbone nodes that are not connected to the scaffolds that we want to put back # are deleted. for node_id in self.scaffolds_graph.scaffold.node.keys(): # check that the backbone node is not adjacent to a removed node. if len(set(nxG.adj[node_id].keys()).intersection(self.scaffolds_graph.removed_scaffolds.node.keys())) == 0: nxG.remove_node(node_id) # remove backbone edges. That is, if there is some edge between two backbone nodes, this is removed (see # example on the docstring). for u, v in list(nxG.edges()): if 'is_backbone' in nxG.node[u] and 'is_backbone' in nxG.node[v]: nxG.remove_edge(u, v) nx.write_graphml(nxG, "{}/backbone_put_back_scaffolds.graphml".format(self.out_folder)) # now each connected component should only have a backbone node # and all the connected scaffolds that belong to that node. for branch in list(nx.connected_component_subgraphs(nxG)): branch_len = sum([branch.node[x]['length'] for x in branch]) branch_nodes = [x for x in branch] log.debug("Checking branch for insertion in assembly.\nLength:{}\nScaffolds:{}". format(branch_len, branch_nodes)) if len(branch) > 20: log.info("Skipping the insertion of a branch that is too long. " "The length of the branch is: {} (threshold is 20)".format(len(branch))) continue # after removing the hubs the branch may contain several connected components. Only the component # that contains a backbone node is used. backbone_list = HiCAssembler._find_backbone_node(branch) if len(backbone_list) == 0: if len(branch_nodes) == 1: continue # this is a branch without a backbone and is inserted as a separated # hic-scaffold branch = HiCAssembler._remove_weakest(branch) path = Scaffolds._return_paths_from_graph(branch)[0] for scaff_name in path: # restore all scaffolds except for path[0] which is the backbone node (and was not removed) self.scaffolds_graph.restore_scaffold(scaff_name) # get the matrix bin paths for each scaffold bins_path = [self.scaffolds_graph.scaffold.node[x]['path'] for x in path] # a. find the best orientation of the scaffold paths with respect to each other # the best path contains the bins_path in the best computed orientations # best_path[0] is the backbone scaffold path best_path = Scaffolds.find_best_permutation(orig_scaff.hic.matrix, bins_path, only_expand_but_not_permute=True) # b. add edges in the path for path_u, path_v in zip(best_path[:-1], best_path[1:]): self.scaffolds_graph.add_edge_matrix_bins(path_u[-1], path_v[0]) log.debug("No backbone found for branch with nodes: {}".format(branch.node.keys())) log.info("Scaffolds without a backbone node were added: {}".format(path)) continue # each branch should contain at most two backbone node if len(backbone_list) > 2: log.info("Branch contains more than one backbone. Backbones in branch: {}".format(backbone_list)) log.info("Skipping this branch of length: {}".format(len(branch))) continue # if the branch contains two backbone nodes and is a path # that means that the path is connecting two different # hic-scaffolds. The solution is to break the path by the # weakest link instead of letting the path to join the # two different hic-scaffolds elif len(backbone_list) == 2: # check if the branch forms a path with the backbones at the two ends path = HiCAssembler._get_paths_from_backbone(branch, list(backbone_list)[0]) if len(path) > 1: continue path = path[0] if backbone_list.intersection([path[0], path[-1]]) != backbone_list: # the two backbones are not on the sides of the path. continue # check if the backbone nodes are adjacent in # the hic-scaffolds. That means the removed path # should be inserted between them. if path[0] in self.scaffolds_graph.scaffold.adj[path[-1]]: # remove one of the back bones of the graph and continue log.debug("Removing one backbone scaffold from branch with two backbones") branch.remove_node(path[-1]) self.insert_path(path[:-1], orig_scaff) continue else: # the backbones belong to different hic-scaffolds # the path is split by the weakest edge. min_weight = np.Inf for u, v, attr in branch.edges(data=True): if attr['weight'] < min_weight: min_edge = (u, v) min_weight = attr['weight'] log.debug("Removing weak edge in path connecting two hic-scaffolds: " "edge: {}, weight: {}".format(min_edge, min_weight)) idx_u = path.index(min_edge[0]) idx_v = path.index(min_edge[1]) if idx_u > idx_v: idx_u, idx_v = idx_v, idx_u assert idx_u + 1 == idx_v path_a = path[:idx_v] path_b = path[idx_v:] if len(path_a) > 0: self.insert_path(path_a, orig_scaff) if len(path_b) > 0: # path b must be inverted such that path[0] # corresponds to the backbone node self.insert_path(path_b[::-1], orig_scaff) continue else: backbone_node = list(backbone_list)[0] # A this point a branch may look like this # o # / # o--*--o--o # \ # o--o--o # \ # o # # where `*` is the backbone node. branch = HiCAssembler._remove_weakest(branch, exclude=[backbone_node]) # after removing the weakest edges parts of the graph are no longer connected to the backbone # thus, the subgraph containing the backbone is selected branch = HiCAssembler._get_subgraph_containing_node(branch, backbone_node) if branch is None: log.debug("Graph is emtpy") continue for path in HiCAssembler._get_paths_from_backbone(branch, backbone_node): self.insert_path(path, orig_scaff) log.info("Total assembly length after adding scaffolds back: {:,}".format(self.scaffolds_graph.get_assembly_length()[0])) return def insert_path(self, path, orig_scaff): """ Parameters ---------- path Returns ------- """ # in path, path[0] is always the backbone node. for scaff_name in path[1:]: # restore all scaffolds except for path[0] which is the backbone node (and was not removed) self.scaffolds_graph.restore_scaffold(scaff_name) # get the matrix bin paths for each scaffold bins_path = [self.scaffolds_graph.scaffold.node[x]['path'] for x in path] # a. find the best orientation of the scaffold paths with respect to each other # the best path contains the bins_path in the best computed orientations # best_path[0] is the backbone scaffold path best_path = Scaffolds.find_best_permutation(orig_scaff.hic.matrix, bins_path, only_expand_but_not_permute=True) # the backbone bin id that should be joined with the removed scaffold # corresponds to the the last bin_id in the first best_path, which is the backbone path backbone_bin = best_path[0][-1] # identify the neighbor bin of the backbone scaffold in adjacent scaffold (if any). # To insert the removed scaffolds an edge in the assembled scaffolds has to be removed. This # edge is the edge containing the backbone_bin. adjacent_backbone_bin = None for adj in self.scaffolds_graph.matrix_bins.adj[backbone_bin].keys(): if self.scaffolds_graph.matrix_bins.node[adj]['name'] != self.scaffolds_graph.matrix_bins.node[backbone_bin]['name']: adjacent_backbone_bin = adj break if adjacent_backbone_bin is not None: # delete edge between backbone and adjacent scaffold self.scaffolds_graph.delete_edge_from_matrix_bins(backbone_bin, adjacent_backbone_bin) # add edge between last bin in best path and adjacent scaffold self.scaffolds_graph.add_edge_matrix_bins(best_path[-1][-1], adjacent_backbone_bin) # b. add the other edges in the path for path_u, path_v in zip(best_path[:-1], best_path[1:]): self.scaffolds_graph.add_edge_matrix_bins(path_u[-1], path_v[0]) integrated_paths = [(x, self.scaffolds_graph.scaffold.node[x]['length']) for x in path[1:]] log.info("Scaffolds {} successfully integrated into the network".format(integrated_paths)) def split_misassemblies(self, hic_file_name, split_positions_file=None): """ Mis assemblies are commonly found in the data. To remove them, we use a simple metric to identify empty contacts. Parameters ---------- hic_file_name : Name of the file Returns ------- """ log.info("Detecting misassemblies") tad_score_file = self.out_folder + "/misassembly_score.txt" zscore_matrix_file = self.out_folder + "/zscore_matrix.h5" # check if the computation for the misassembly score was already done if not os.path.isfile(tad_score_file) or not os.path.isfile(zscore_matrix_file): ft = hicFindTADs.HicFindTads(hic_file_name, num_processors=self.num_processors, use_zscore=False) # adjust window sizes to compute misassembly score (aka tad-score) ft.max_depth = max(800000, ft.binsize * 500) ft.min_depth = min(200000, ft.binsize * 200) ft.step = ft.binsize * 50 log.debug("zscore window sizes set by hicassembler: ") log.debug("max depth:\t{}".format(ft.max_depth)) log.debug("min depth:\t{}".format(ft.min_depth)) log.debug("step:\t{}".format(ft.step)) log.debug("bin size:\t{}".format(ft.binsize)) ft.hic_ma.matrix.data = np.log1p(ft.hic_ma.matrix.data) ft.hic_ma.matrix = ft.hic_ma.convert_to_obs_exp_matrix(perchr=True) ft.hic_ma.matrix.data = np.log2(ft.hic_ma.matrix.data) ft.compute_spectra_matrix(perchr=True) ft.save_bedgraph_matrix(tad_score_file) ft.hic_ma.save(zscore_matrix_file) log.info("Using previously computed scores: {}\t{}".format(tad_score_file, zscore_matrix_file)) # TODO here the hic_file is loaded unnecessarily. A way to remove this step would be good ft = hicFindTADs.HicFindTads(hic_file_name, num_processors=self.num_processors, use_zscore=False) ft.hic_ma = HiCMatrix.hiCMatrix(zscore_matrix_file) ft.load_bedgraph_matrix(tad_score_file) ft.find_boundaries() tuple_ = [] # find the tad score and position of boundaries with significant pvalues for idx, pval in ft.boundaries['pvalues'].iteritems(): tuple_.append((ft.bedgraph_matrix['chrom'][idx], ft.bedgraph_matrix['chr_start'][idx], ft.bedgraph_matrix['chr_end'][idx], np.mean(ft.bedgraph_matrix['matrix'][idx]))) scaffold, start, end, tad_score = zip(*tuple_) tad_score = np.array(tad_score) # compute a zscore of the tad_score to select the lowest ranking boundaries. zscore = (tad_score - np.mean(tad_score)) / np.std(tad_score) # select as misassemblies all boundaries that have a zscore lower than 1.64 (p-value 0.05) bin_ids = {} bins_to_remove = [] log.info("Splitting scaffolds using threshold = {}".format(self.misassembly_threshold)) for idx in np.flatnonzero(zscore < self.misassembly_threshold): # find the bins that overlap with the misassembly if scaffold[idx] not in self.hic.interval_trees: # the scaffold[idx] key may not be present # in the self.hic because of the reduction of the matrix, which removes some scaffolds continue if scaffold[idx] not in bin_ids: bin_ids[scaffold[idx]] = [] to_split_intervals = sorted(self.hic.interval_trees[scaffold[idx]][start[idx]:end[idx]]) bin_ids[scaffold[idx]].extend(sorted([interval_bin.data for interval_bin in to_split_intervals])) # split scaffolds based on input file from user if split_positions_file is not None: log.debug("loading positions to split from {}".format(split_positions_file)) from hicexplorer import readBed bed_file_h = readBed.ReadBed(open(split_positions_file, 'r')) for bed in bed_file_h: # find the bins that overlap with the misassembly if bed.chromosome not in self.hic.interval_trees: log.info("split position {} not found in hic matrix".format(bed)) continue if bed.chromosome not in bin_ids: bin_ids[bed.chromosome] = [] to_split_intervals = sorted(self.hic.interval_trees[bed.chromosome][bed.start:bed.end]) if len(to_split_intervals) == 0: # it could be that there is not bin nearby so the nearest bin is taken log.info('split position from split list {} does not match any bin. Using nearest bin'.format(bed)) to_split_intervals = [sorted(self.hic.interval_trees[bed.chromosome][0:bed.end])[-1]] log.info('split position used is {}.'.format(to_split_intervals[0])) to_split_intervals = sorted([interval_bin.data for interval_bin in to_split_intervals]) if len(to_split_intervals) > 1: # if the split contains several bins, the region should be removed from the matrix. # All the bins, except the last one, are marked for deletion. The last one is marked # for split. bins_to_remove.extend(to_split_intervals[:-1]) to_split_intervals = [to_split_intervals[-1]] bin_ids[bed.chromosome].extend(to_split_intervals) # rename cut intervals num_removed_misassemblies = 0 new_cut_intervals = self.hic.cut_intervals[:] for scaff_name in bin_ids: scaff_bins = self.hic.getChrBinRange(scaff_name) # remove splits at the start or end of chromosome as they are most likely # false positives id_list = set(sorted([x for x in bin_ids[scaff_name] if x not in [scaff_bins[0], scaff_bins[1] - 1]])) part_number = 1 if len(id_list) > 0: log.info("Removing {} misassemblies for {} ".format(len(id_list), scaff_name)) for matrix_bin in range(scaff_bins[0], scaff_bins[1]): name, cut_start, cut_end, extra = new_cut_intervals[matrix_bin] new_name = "{}/{}".format(name, part_number) new_cut_intervals[matrix_bin] = (new_name, cut_start, cut_end, extra) if matrix_bin in id_list: part_number += 1 num_removed_misassemblies += 1 self.hic.setCutIntervals(new_cut_intervals) log.info("{} misassemblies were removed".format(num_removed_misassemblies)) if len(bins_to_remove) > 0: log.info("{} bins will be removed from the matrix because they are contained within the split regions.". format(len(bins_to_remove))) self.hic.removeBins(bins_to_remove) def plot_matrix(self, filename, title='Assembly results', cmap='RdYlBu_r', log1p=True, add_vlines=False, vmax=None, vmin=None): """ Plots the resolved paths on a matrix Parameters ---------- filename title cmap log1p add_vlines vmax vmin Returns ------- None """ log.debug("plotting matrix") import matplotlib.pyplot as plt from matplotlib.colors import LogNorm fig = plt.figure(figsize=(10, 10)) hic = self.reorder_matrix() axHeat2 = fig.add_subplot(111) axHeat2.set_title(title) chrbin_boundaries = hic.chrBinBoundaries ma = hic.matrix.todense() norm = None if log1p: ma += 1 norm = LogNorm() img3 = axHeat2.imshow(ma, interpolation='nearest', vmax=vmax, vmin=vmin, cmap=cmap, norm=norm) img3.set_rasterized(True) from mpl_toolkits.axes_grid1 import make_axes_locatable divider = make_axes_locatable(axHeat2) cax = divider.append_axes("right", size="2.5%", pad=0.09) cbar = fig.colorbar(img3, cax=cax) cbar.solids.set_edgecolor("face") # to avoid white lines in the color bar in pdf plots ticks = [pos[0] for pos in chrbin_boundaries.values()] labels = chrbin_boundaries.keys() axHeat2.set_xticks(ticks) if len(labels) < 40: axHeat2.set_xticklabels(labels, size=3, rotation=90) else: axHeat2.set_xticklabels(labels, size=1, rotation=90) if add_vlines: # add lines to demarcate 'super scaffolds' vlines = [x[0] for x in hic.chromosomeBinBoundaries.values()] axHeat2.vlines(vlines, 1, ma.shape[0], linewidth=0.1) axHeat2.set_ylim(ma.shape[0], 0) axHeat2.get_yaxis().set_visible(False) log.debug("saving matrix {}".format(filename)) plt.savefig(filename, dpi=300) plt.close() def remove_noise_from_matrix(self): """ set noise level at the value found in up to 70% of the sparse matrix data. The noise is removed from the hic matrix. Returns ------- None """ noise_level = np.percentile(self.hic.matrix.data, 70) log.debug("noise level set to {}".format(noise_level)) self.hic.matrix.data = self.hic.matrix.data - noise_level self.hic.matrix.data[self.hic.matrix.data < 0] = 0 self.hic.matrix.eliminate_zeros() def get_contig_order(self, add_split_contig_name=False): """ Parameters ---------- add_split_contig_name Returns ------- Examples -------- >>> import tempfile >>> dirpath = tempfile.mkdtemp(prefix="hicassembler_test_") >>> from hicassembler.Scaffolds import get_test_matrix as get_test_matrix >>> cut_intervals = [('c-0', 0, 10, 1), ('c-0', 10, 30, 2), ('c-1', 0, 10, 1), ... ('c-1', 10, 20, 1), ('c-2/1', 0, 10, 1), ('c-2/2', 10, 30, 1)] >>> hic = get_test_matrix(cut_intervals=cut_intervals) >>> H = HiCAssembler(hic, "", dirpath, split_misassemblies=False, min_scaffold_length=0) >>> H.scaffolds_graph.add_edge_matrix_bins(1,3) >>> H.get_contig_order(add_split_contig_name=False) [[('c-0', 0, 30, '+'), ('c-1', 0, 20, '-')], [('c-2', 0, 10, '+')], [('c-2', 10, 30, '+')]] >>> H.get_contig_order(add_split_contig_name=True) [[('c-0', 0, 30, '+'), ('c-1', 0, 20, '-')], [('c-2/1', 0, 10, '+')], [('c-2/2', 10, 30, '+')]] >>> import shutil >>> shutil.rmtree(dirpath) """ import re super_scaffolds = [] for path in self.scaffolds_graph.scaffold.get_all_paths(): scaffold = [] for scaff_name in path: scaff_data = self.scaffolds_graph.scaffold.node[scaff_name] if add_split_contig_name is False: # check if node name has an indication that it was split (by ending in '/n') res = re.search("(.*?)/(\d+)$", scaff_name) if res is not None: scaff_name = res.group(1) scaffold.append((scaff_name, scaff_data['start'], scaff_data['end'], scaff_data['direction'])) super_scaffolds.append(scaffold) # sanity check def get_start_end_direction(_scaff_name, _bin_list, _start_list, _end_list): # check that the path in the scaffold is the same the bin_list assert self.scaffolds_graph.scaffold.node[_scaff_name]['path'] == _bin_list # check direction of scaffold. If the bin id's are decreasing # then the direction is "-" _scaff_start = _scaff_end = _direction = None if len(_bin_list) == 1: _direction = "+" _scaff_start = _start_list[0] _scaff_end = _end_list[0] elif all(y - x == -1 for x, y in zip(_bin_list, _bin_list[1:])): _direction = "-" _scaff_start = min(_start_list) _scaff_end = max(_end_list) assert _scaff_start == _start_list[-1] assert _scaff_end == _end_list[0] elif all(y - x == 1 for x, y in zip(_bin_list, _bin_list[1:])): _direction = "+" _scaff_start = min(_start_list) _scaff_end = max(_end_list) assert _scaff_start == _start_list[0] assert _scaff_end == _end_list[-1] else: # in this case the bins are not continuous. How did that happen? sys.stderr.write('Bins are not continuous. How did that happened?') return _scaff_start, _scaff_end, _direction scaff_order = {} gaps = {} for idx, matrix_bin_path in enumerate(self.scaffolds_graph.matrix_bins.get_all_paths()): scaff_order[idx] = [] prev_scaff_name = None gaps[idx] = [] start_list = [] end_list = [] bin_list = [] # matrix_bin path is as list, containing all the bins that form a path # those bins are part of scaffolds for bin_id in matrix_bin_path: # get the scaffold name scaff_name, start, end, extra = self.hic.getBinPos(bin_id) bin_data = self.scaffolds_graph.matrix_bins.node[bin_id] assert bin_data['name'] == scaff_name assert bin_data['start'] == start assert bin_data['end'] == end if scaff_name != prev_scaff_name and prev_scaff_name is not None: scaff_start, scaff_end, direction = get_start_end_direction(prev_scaff_name, bin_list, start_list, end_list) scaff_order[idx].append((prev_scaff_name, scaff_start, scaff_end, direction)) start_list = [] end_list = [] bin_list = [] start_list.append(start) end_list.append(end) bin_list.append(bin_id) prev_scaff_name = scaff_name scaff_start, scaff_end, direction = get_start_end_direction(scaff_name, bin_list, start_list, end_list) scaff_order[idx].append((scaff_name, scaff_start, scaff_end, direction)) # scaffolds that were removed and could not be put back need to be returned as well for scaff in self.scaffolds_graph.removed_scaffolds.node.values(): idx += 1 scaff_order[idx] = [(scaff['name'], scaff['start'], scaff['end'], '+')] if add_split_contig_name is False: scaff_order_renamed = {} for idx, scaff_path in scaff_order.iteritems(): scaff_order_renamed[idx] = [] for scaff_name, scaff_start, scaff_end, scaff_direction in scaff_path: # check if node name has an indication that it was split (by ending in '/n') res = re.search("(.*?)/(\d+)$", scaff_name) if res is not None: scaff_name = res.group(1) scaff_order_renamed[idx].append((scaff_name, scaff_start, scaff_end, scaff_direction)) scaff_order = scaff_order_renamed # brute force comparison not_found_list = {} for idx, hic_scaff_order in scaff_order.iteritems(): not_found_list[idx] = [] for super_scaff in super_scaffolds: match = False if hic_scaff_order == super_scaff: match = True break if match is False: not_found_list[idx].append(hic_scaff_order) log.debug(not_found_list) return scaff_order.values() def reorder_matrix(self, max_num_bins=4000, rename_scaffolds=False): """ Reorders the matrix using the assembled paths max_num_bins: since reorder is used mostly for plotting, it is required that the matrices are not too large thus, a maximum number of bins can be set. rename_scaffolds: Set to true if the original scaffold names that are already merged should be renamed as hic_scaffold_{n} where n is a counter Returns ------- """ import re def sorted_nicely(list_to_order): """ Sort the given iterable in the way that humans expect.""" convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(list_to_order, key=alphanum_key) log.debug("reordering matrix") hic = copy.deepcopy(self.scaffolds_graph.hic) order_list = [] from collections import OrderedDict scaff_boundaries = OrderedDict() start_bin = 0 end_bin = 0 num_bins_to_merge = hic.matrix.shape[0] / max_num_bins # reduce the density of the matrix if this one is too big if hic.matrix.shape[0] > max_num_bins and num_bins_to_merge > 1: # compute number of bins required to reduce resolution to desired # goal num_bins_to_merge = hic.matrix.shape[0] / max_num_bins log.debug("Matrix size is too large for printing. Reducing the matrix by merging {} bins". format(num_bins_to_merge)) hic, map_old_to_merged = HiCAssembler.merge_bins(hic, num_bins_to_merge, skip_small=False, return_bin_id_mapping=True) else: map_old_to_merged = None # check if scaffolds are already merged, and if not # sort the names alphanumerically. if self.scaffolds_graph.scaffold.path == {}: scaffold_order = sorted_nicely(list([x for x in self.scaffolds_graph.scaffold])) # after merging, small scaffolds will be removed from the matrix. They need # to be removed from scaffold_order before reordering the chromosomes to avoid an error scaffold_order = [x for x in scaffold_order if x in hic.chrBinBoundaries.keys()] hic.reorderChromosomes(scaffold_order) hic.chromosomeBinBoundaries = hic.chrBinBoundaries else: path_list_test = {} for idx, scaff_path in enumerate(self.scaffolds_graph.scaffold.get_all_paths()): # scaff_path looks like: # ['scaffold_12970/3', 'scaffold_12472/3', 'scaffold_12932/3', 'scaffold_12726/3', 'scaffold_12726/1'] path_list_test[idx] = [] for scaffold_name in scaff_path: bin_path = self.scaffolds_graph.scaffold.node[scaffold_name]['path'] if map_old_to_merged is not None: new_bin_path = [] seen = set() for bin_id in bin_path: map_old_to_merged[bin_id] if map_old_to_merged[bin_id] not in seen: new_bin_path.append(map_old_to_merged[bin_id]) seen.add(map_old_to_merged[bin_id]) else: new_bin_path = bin_path order_list.extend(new_bin_path) path_list_test[idx].extend(new_bin_path) end_bin += len(new_bin_path) # assert path_list_test[idx] == self.scaffolds_graph.matrix_bins[path_list_test[idx][0]] scaff_boundaries["scaff_{}".format(idx)] = (start_bin, end_bin) start_bin = end_bin hic.reorderBins(order_list) hic.chromosomeBinBoundaries = scaff_boundaries if rename_scaffolds is True: new_intervals = [] start_list = [] for idx, scaff_id in enumerate(hic.chromosomeBinBoundaries): start_bin, end_bin = hic.chromosomeBinBoundaries[scaff_id] start = 0 for interval in hic.cut_intervals[start_bin:end_bin]: scaff_name, int_start, int_end, cov = interval end = start + (int_end - int_start) new_intervals.append(("hic_scaffold_{}".format(idx + 1), start, end, cov)) start_list.append((start, end, int_start, int_end, int_end - int_start)) start = end hic.setCutIntervals(new_intervals) return hic @staticmethod def merge_bins(hic, num_bins, skip_small=True, return_bin_id_mapping=False): """ Merge the bins using the specified number of bins. This functions takes care to make new intervals Parameters ---------- hic : HiCMatrix object num_bins : number of consecutive bins to merge. Returns ------- A sparse matrix. Set up a Hi-C test matrix >>> from scipy.sparse import csr_matrix >>> row, col = np.triu_indices(5) >>> cut_intervals = [('a', 0, 10, 0.5), ('a', 10, 20, 1), ... ('a', 20, 30, 1), ('a', 30, 40, 0.1), ('b', 40, 50, 1)] >>> hic = HiCMatrix.hiCMatrix() >>> hic.nan_bins = [] >>> matrix = np.array([ ... [ 50, 10, 5, 3, 0], ... [ 0, 60, 15, 5, 1], ... [ 0, 0, 80, 7, 3], ... [ 0, 0, 0, 90, 1], ... [ 0, 0, 0, 0, 100]], dtype=np.int32) make the matrix symmetric: >>> from scipy.sparse import dia_matrix >>> dia = dia_matrix(([matrix.diagonal()], [0]), shape=matrix.shape) >>> hic.matrix = csr_matrix(matrix + matrix.T - dia) >>> hic.setMatrix(hic.matrix, cut_intervals) run merge_matrix >>> merge_matrix, map_id = HiCAssembler.merge_bins(hic, 2, return_bin_id_mapping=True) >>> merge_matrix.cut_intervals [('a', 0, 20, 0.75), ('a', 20, 40, 0.55000000000000004), ('b', 40, 50, 1.0)] >>> merge_matrix.matrix.todense() matrix([[120, 28, 1], [ 28, 177, 4], [ 1, 4, 100]], dtype=int32) >>> map_id {0: 0, 1: 0, 2: 1, 3: 1, 4: 2} """ hic = hicexplorer.hicMergeMatrixBins.remove_nans_if_needed(hic) # get the bins to merge ref_name_list, start_list, end_list, coverage_list = zip(*hic.cut_intervals) new_bins = [] bins_to_merge = [] prev_ref = ref_name_list[0] # prepare new intervals idx_start = 0 new_start = start_list[0] count = 0 merge_bin_id = 0 mapping_old_to_merged_bin_ids = {} for idx, ref in enumerate(ref_name_list): if (count > 0 and count % num_bins == 0) or ref != prev_ref: if skip_small is True and count < num_bins / 2: sys.stderr.write("{} has few bins ({}). Skipping it\n".format(prev_ref, count)) else: coverage = np.mean(coverage_list[idx_start:idx]) new_end = end_list[idx - 1] if new_start > new_end: sys.stderr.write("end of new merged bin is smaller than start") new_bins.append((ref_name_list[idx_start], new_start, end_list[idx - 1], coverage)) bins_to_merge.append(list(range(idx_start, idx))) for old_bin_id in list(range(idx_start, idx)): mapping_old_to_merged_bin_ids[old_bin_id] = merge_bin_id merge_bin_id += 1 idx_start = idx new_start = start_list[idx] count = 0 prev_ref = ref count += 1 if skip_small is True and count < num_bins / 2: sys.stderr.write("{} has few bins ({}). Skipping it\n".format(prev_ref, count)) else: coverage = np.mean(coverage_list[idx_start:]) new_end = end_list[idx - 1] if new_start > new_end: sys.stderr.write("end of new merged bin is smaller than start") new_bins.append((ref, new_start, end_list[idx], coverage)) bins_to_merge.append(list(range(idx_start, idx + 1))) for old_bin_id in list(range(idx_start, idx + 1)): mapping_old_to_merged_bin_ids[old_bin_id] = merge_bin_id merge_bin_id += 1 hic.matrix = hicexplorer.hicMergeMatrixBins.reduce_matrix(hic.matrix, bins_to_merge, diagonal=True) hic.matrix.eliminate_zeros() hic.setCutIntervals(new_bins) hic.nan_bins = np.flatnonzero(hic.matrix.sum(0).A == 0) if return_bin_id_mapping is True: return hic, mapping_old_to_merged_bin_ids else: return hic class HiCAssemblerException(Exception): """Base class for exceptions in HiCAssembler."""
PypiClean
/Django_Verify_Email-2.0.3-py3-none-any.whl/verify_email/app_configurations.py
from django.conf import settings class GetFieldFromSettings: """ This class fetches the attributes that are defined in settings.py of your project by user OR Django itself. self.default_configs : is a dict with keys as the names used in this app and values being a tuple of attributes defined in settings.py and their corresponding default values if not found. There is a special case in "get" method, if you set "VERIFICATION_SUCCESS_TEMPLATE" as None is settings.py, it will skip the intermediate page where success information is displayed. (This is better explained in docs.) The "get" method takes the name of the attributes as input, checks for it in settings.py, if found: returns the corresponding value. else: returns the default value from "self.defaults_configs". """ def __init__(self): self.defaults_configs = { 'debug_settings': ( 'DEBUG', False ), 'subject': ( "SUBJECT", "Email Verification Mail" ), 'email_field_name': ( "EMAIL_FIELD_NAME", "email", ), 'html_message_template': ( "HTML_MESSAGE_TEMPLATE", 'verify_email/email_verification_msg.html' ), 'from_alias': ( "DEFAULT_FROM_EMAIL", 'noreply<[email protected]>', ), 'login_page': ( 'LOGIN_URL', 'accounts_login' ), 'verification_success_template': ( 'VERIFICATION_SUCCESS_TEMPLATE', 'verify_email/email_verification_successful.html' ), 'verification_success_msg': ( 'VERIFICATION_SUCCESS_MSG', "Your Email is verified successfully and account has been activated. " "You can login with the credentials now..." ), 'verification_failed_template': ( 'VERIFICATION_FAILED_TEMPLATE', 'verify_email/email_verification_failed.html' ), 'link_expired_template': ( 'LINK_EXPIRED_TEMPLATE', 'verify_email/link_expired.html' ), 'verification_failed_msg': ( 'VERIFICATION_FAILED_MSG', "There is something wrong with this link, can't verify the user..." ), 'request_new_email_template': ( 'REQUEST_NEW_EMAIL_TEMPLATE', 'verify_email/request_new_email.html' ), 'new_email_sent_template': ( 'NEW_EMAIL_SENT_TEMPLATE', 'verify_email/new_email_sent.html' ), 'salt': ( 'HASH_SALT', None ), 'sep': ( 'SEPARATOR', ':' ), 'key': ( 'HASHING_KEY', None ), 'max_age': ( 'EXPIRE_AFTER', None ), 'max_retries': ( 'MAX_RETRIES', 2 ) } def get(self, field_name, raise_exception=True, default_type=str): attr = getattr( settings, self.defaults_configs[field_name][0], # get field from settings self.defaults_configs[field_name][1] # get default value if field not defined ) if (attr == '' or attr is None or not isinstance(field_name, default_type)) and raise_exception: if field_name == 'verification_success_template' and attr is None: return None raise AttributeError return attr
PypiClean
/AdjectorClient-1.0b1.tar.gz/AdjectorClient-1.0b1/adjector/model/entities.py
import logging from datetime import datetime from elixir import using_options, using_table_options, BLOB, Boolean, ColumnProperty, \ DateTime, Entity, EntityMeta, Field, Float, Integer, ManyToMany, ManyToOne, \ OneToMany, OneToOne, SmallInteger, String, UnicodeText from genshi import Markup from sqlalchemy import func, UniqueConstraint from adjector.core.conf import conf from adjector.core.tracking import add_tracking, remove_tracking log = logging.getLogger(__name__) max_int = 2147483647 tz_now = lambda : datetime.now(conf.timezone) UnicodeText = UnicodeText(assert_unicode=False) class CircularDependencyException(Exception): pass class GenericEntity(object): def __init__(self, data): self._updated = self.set(data) def set(self, data): for field in data.keys(): if hasattr(self, field): if field == 'title': data[field] = data[field][:80] self.__setattr__(field, data[field]) else: log.warning('No field: %s' % field) def value(self): return self.__dict__ class GenericListEntity(GenericEntity): def set(self, data): GenericEntity.set(self, data) # Detect cycles in parenting - Brent's algorithm http://www.siafoo.net/algorithm/11 turtle = self rabbit = self steps_taken = 0 step_limit = 2 while True: if not rabbit.parent_id: break #no loop rabbit = rabbit.query.get(rabbit.parent_id) steps_taken += 1 if rabbit == turtle: # loop! raise CircularDependencyException if steps_taken == step_limit: steps_taken = 0 step_limit *=2 turtle = rabbit class CJIgnoredLink(Entity): cj_advertiser_id = Field(Integer, required=True) cj_link_id = Field(Integer, required=True) using_options(tablename=conf.table_prefix + 'cj_ignored_links') using_table_options(UniqueConstraint('cj_link_id')) def __init__(self, link_id, advertiser_id): self.cj_advertiser_id = advertiser_id self.cj_link_id = link_id class Click(Entity): time = Field(DateTime(timezone=True), required=True, default=tz_now) creative = ManyToOne('Creative', ondelete='set null') zone = ManyToOne('Zone', ondelete='set null') using_options(tablename=conf.table_prefix + 'clicks') def __init__(self, creative_id, zone_id): self.creative_id = creative_id self.zone_id = zone_id class Creative(GenericEntity, Entity): parent = ManyToOne('Set', required=False, ondelete='set null') #zones = ManyToMany('Zone', tablename='creatives_to_zones') creative_zone_pairs = OneToMany('CreativeZonePair', cascade='delete') title = Field(String(80, convert_unicode=True), required=True) html = Field(UnicodeText, required=True, default='') is_text = Field(Boolean, required=True, default=False) width = Field(Integer, required=True, default=0) height = Field(Integer, required=True, default=0) start_date = Field(DateTime(timezone=True)) end_date = Field(DateTime(timezone=True)) weight = Field(Float, required=True, default=1.0) add_tracking = Field(Boolean, required=True, default=True) disabled = Field(Boolean, required=True, default=False) create_date = Field(DateTime(timezone=True), required=True, default=tz_now) cj_link_id = Field(Integer) cj_advertiser_id = Field(Integer) cj_site_id = Field(Integer) views = OneToMany('View') clicks = OneToMany('Click') # Cached Values html_tracked = Field(UnicodeText) #will be overwritten on set parent_weight = Field(Float, required=True, default=1.0) # overwritten on any parent weight change using_options(tablename=conf.table_prefix + 'creatives', order_by='title') using_table_options(UniqueConstraint('cj_link_id')) def __init__(self, data): GenericEntity.__init__(self, data) if self.parent_id: self.parent_weight = Set.get(self.parent_id).weight def get_clicks(self, start=None, end=None): query = Click.query.filter_by(creative_id = self.id) if start: query = query.filter(Click.time > start) if end: query = query.filter(Click.time < end) return query.count() def get_views(self, start=None, end=None): query = View.query.filter_by(creative_id = self.id) if start: query = query.filter(View.time > start) if end: query = query.filter(View.time < end) return query.count() @staticmethod def possible_parents(this=None): return [[set.id, set.title] for set in Set.query()] def set(self, data): old_parent_id = self.parent_id old_html = self.html old_add_tracking = self.add_tracking GenericEntity.set(self, data) if self.parent_id != old_parent_id: self.parent_weight = Set.get(self.parent_id).weight # TODO: Handle Block / Text bullshit # Parse html if self.html != old_html or self.add_tracking != old_add_tracking: if self.add_tracking is not False: self.html_tracked = add_tracking(self.html) else: self.html_tracked = None return [self] def value(self): value = GenericEntity.value(self) value['preview'] = Markup(remove_tracking(self.html, self.cj_site_id)) value['total_weight'] = self.weight * self.parent_weight value['html_tracked'] = value['html_tracked'] or value['html'] return value def view(self): return '%s/creative/%i' % (conf.admin_base_url, self.id) class CreativeZonePair(GenericEntity, Entity): creative = ManyToOne('Creative', ondelete='cascade', use_alter=True) zone = ManyToOne('Zone', ondelete='cascade', use_alter=True) is_text = Field(Boolean, required=True) lower_bound = Field(Float, required=True) upper_bound = Field(Float, required=True) using_options(tablename=conf.table_prefix + 'creative_zone_pairs') using_table_options(UniqueConstraint('creative_id', 'zone_id')) class Location(GenericListEntity, Entity): ''' A container for locations or zones ''' parent = ManyToOne('Location', required=False, ondelete='set null') sublocations = OneToMany('Location') zones = OneToMany('Zone') title = Field(String(80, convert_unicode=True), required=True) description = Field(UnicodeText) create_date = Field(DateTime(timezone=True), required=True, default=tz_now) cj_site_id = Field(Integer) parent_cj_site_id = Field(Integer) using_options(tablename=conf.table_prefix + 'locations', order_by='title') def __init__(self, data): GenericEntity.__init__(self, data) if self.parent_id: self.parent_cj_site_id = Location.get(self.parent_id).cj_site_id def delete(self, data): updated = [] for subloc in self.sublocations: updated.extend(subloc.set(dict(parent_cj_site_id = None))) for zone in self.zones: updated.extend(zone.set(dict(parent_cj_site_id = None))) Entity.delete(self) return updated @staticmethod def possible_parents(this = None): filter = None if this: filter = Location.id != this.id return [[location.id, location.title] for location in Location.query.filter(filter)] def set(self, data): updated = [self] old_parent_id = self.parent_id old_cj_site_id = self.cj_site_id old_parent_cj_site_id = self.parent_cj_site_id GenericEntity.set(self, data) if self.parent_id != old_parent_id: self.parent_cj_site_id = Location.get(self.parent_id).cj_site_id if self.cj_site_id != old_cj_site_id or self.parent_cj_site_id != old_parent_cj_site_id: # Only pass parent- down if we don't have our own for subloc in self.sublocations: updated.extend(subloc.set(dict(parent_cj_site_id = self.cj_site_id or self.parent_cj_site_id))) for zone in self.zones: updated.extend(zone.set(dict(parent_cj_site_id = self.cj_site_id or self.parent_cj_site_id))) return updated def view(self): return '%s/location/%i' % (conf.admin_base_url, self.id) class Set(GenericListEntity, Entity): parent = ManyToOne('Set', required=False, ondelete='set null') subsets = OneToMany('Set') creatives = OneToMany('Creative') title = Field(String(80, convert_unicode=True), required=True) description = Field(UnicodeText) weight = Field(Float, required=True, default=1.0) parent_weight = Field(Float, required=True, default=1.0) # overwritten on any parent weight change create_date = Field(DateTime(timezone=True), required=True, default=tz_now) cj_advertiser_id = Field(Integer) using_options(tablename=conf.table_prefix + 'sets', order_by='title') using_table_options(UniqueConstraint('cj_advertiser_id')) def __init__(self, data): GenericEntity.__init__(self, data) if self.parent_id: self.parent_weight = Set.get(self.parent_id).weight def delete(self, data): updated = [] for subset in self.subsets: updated.extend(subset.set(dict(parent_weight = 1.0))) for creative in self.creatives: updated.extend(creative.set(dict(parent_weight = 1.0))) Entity.delete(self) return updated @staticmethod def possible_parents(this = None): filter = None if this: filter = Set.id != this.id return [[set.id, set.title] for set in Set.query.filter(filter)] def set(self, data): updated = [self] old_parent_id = self.parent_id old_weight = self.weight old_parent_weight = self.parent_weight GenericEntity.set(self, data) if self.parent_id != old_parent_id: self.parent_weight = Set.get(self.parent_id).weight if self.weight != old_weight or self.parent_weight != old_parent_weight: for subset in self.subsets: updated.extend(subset.set(dict(parent_weight = self.parent_weight * self.weight))) for creative in self.creatives: updated.extend(creative.set(dict(parent_weight = self.parent_weight * self.weight))) return updated def value(self): value = GenericEntity.value(self) value['total_weight'] = self.weight * self.parent_weight return value def view(self): return '%s/set/%i' % (conf.admin_base_url, self.id) class View(GenericEntity, Entity): time = Field(DateTime(timezone=True), required=True, default=tz_now) creative = ManyToOne('Creative', ondelete='set null') zone = ManyToOne('Zone', ondelete='set null') using_options(tablename=conf.table_prefix + 'views') def __init__(self, creative_id, zone_id): self.creative_id = creative_id self.zone_id = zone_id class Zone(GenericEntity, Entity): parent = ManyToOne('Location', required=False, ondelete='set null') creative_zone_pairs = OneToMany('CreativeZonePair', cascade='delete') name = Field(String(80, convert_unicode=True), required=False) title = Field(String(80, convert_unicode=True), required=True) description = Field(UnicodeText) #creatives = ManyToMany('Creative', tablename='creatives_to_zones') normalize_by_container = Field(Boolean, required=True, default=False) creative_types = Field(SmallInteger, required=True, default=0) #0: Both, 1: Text, 2: Blocks # These only matter if blocks allowed min_width = Field(Integer, required=True, default=0) max_width = Field(Integer, required=True, default=max_int) min_height = Field(Integer, required=True, default=0) max_height = Field(Integer, required=True, default=max_int) # These only matter if text allowed num_texts = Field(SmallInteger, required=True, default=1) weight_texts = Field(Float, required=True, default=1.0) before_all_text = Field(UnicodeText) after_all_text = Field(UnicodeText) before_each_text = Field(UnicodeText) after_each_text = Field(UnicodeText) create_date = Field(DateTime(timezone=True), required=True, default=tz_now) # Cached from parent parent_cj_site_id = Field(Integer) # Cached from creatives total_text_weight = Field(Float) # i dunno, some default? should be updated quick. views = OneToMany('View') clicks = OneToMany('Click') using_options(tablename=conf.table_prefix + 'zones', order_by='title') def __init__(self, data): GenericEntity.__init__(self, data) if self.parent_id: self.parent_cj_site_id = Location.get(self.parent_id).cj_site_id def get_clicks(self, start=None, end=None): query = Click.query.filter_by(zone_id = self.id) if start: query = query.filter(Click.time > start) if end: query = query.filter(Click.time < end) return query.count() def get_views(self, start=None, end=None): query = View.query.filter_by(zone_id = self.id) if start: query = query.filter(View.time > start) if end: query = query.filter(View.time < end) return query.count() @staticmethod def possible_parents(this=None): return [[location.id, location.title] for location in Location.query()] def set(self, data): if data.has_key('previous_name'): del data['previous_name'] old_parent_id = self.parent_id GenericEntity.set(self, data) if self.parent_id != old_parent_id: self.parent_cj_site_id = Location.get(self.parent_id).cj_site_id return [self] def value(self): val = self.__dict__.copy() val['previous_name'] = self.name return val def view(self): return '%s/zone/%i' % (conf.admin_base_url, self.id)
PypiClean
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/data/detection/coco/coco_evaluation.py
__author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 22/03/2020 """ import copy import json import logging from collections import defaultdict, namedtuple from datetime import datetime from enum import Enum from pathlib import Path from typing import Any, Dict, List, Sequence, Tuple import numpy import pycocotools.mask import torch import torch._six import torchvision from pycocotools.coco import COCO # Version 2.0 REQUIRES numpy 1.17 from pycocotools.cocoeval import COCOeval from warg import IncompatiblePackageVersions if pycocotools.coco.__version__ == "2.0" and "1.18" in numpy.__version__: print("Hint: downgrade numpy to 1.17.x") raise IncompatiblePackageVersions( numpy, "pycocotools", pycocotools=pycocotools.coco.__version__ ) from draugr.torch_utilities import minmax_to_xywh_torch from neodroidvision.utilities.torch_utilities.distributing.distributing_utilities import ( all_gather_cuda, ) __all__ = [ "CocoEvaluator", "merge", "create_common_coco_eval", "create_index", "load_results", "coco_evaluation", "get_iou_types", ] BboxPredTuple = namedtuple("BboxPredTuple", ("boxes", "scores", "labels")) SegmPredTuple = namedtuple("SegmPredTuple", ("masks", "scores", "labels")) KeypointsPredTuple = namedtuple("KeypointsPredTuple", ("keypoints", "scores", "labels")) class IouType(Enum): BoundingBox = "bbox" Segmentation = "segm" Keypoints = "keypoints" class CocoEvaluator(object): """description""" def __init__(self, coco_api: COCO, iou_types: Sequence[IouType]): assert isinstance(iou_types, (list, tuple)) self.coco_api = copy.deepcopy(coco_api) self.iou_types = iou_types self.coco_eval = {} for iou_type in iou_types: assert iou_type in IouType self.coco_eval[iou_type] = COCOeval(self.coco_api, iouType=iou_type.value) self.img_ids = [] self.eval_imgs = {k: [] for k in iou_types} def update(self, predictions: Dict) -> None: """ :param predictions: :type predictions:""" img_ids = list(numpy.unique(list(predictions.keys()))) self.img_ids.extend(img_ids) for iou_type in self.iou_types: results = self.prepare_data(predictions, iou_type) coco_dt = load_results(self.coco_api, results) if results else COCO() coco_eval = self.coco_eval[iou_type] coco_eval.cocoDt = coco_dt coco_eval.params.imgIds = list(img_ids) img_ids, eval_imgs = evaluate(coco_eval) self.eval_imgs[iou_type].append(eval_imgs) def synchronize_between_processes(self): """description""" for iou_type in self.iou_types: self.eval_imgs[iou_type] = numpy.concatenate(self.eval_imgs[iou_type], 2) create_common_coco_eval( self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type] ) def accumulate(self): """description""" for coco_eval in self.coco_eval.values(): coco_eval.accumulate() def summarize(self): """description""" for iou_type, coco_eval in self.coco_eval.items(): print(f"IoU metric: {iou_type}") coco_eval.summarize() def prepare_data( self, predictions: Sequence, iou_type: IouType ) -> List[Dict[str, Any]]: """ :param predictions: :type predictions: :param iou_type: :type iou_type: :return: :rtype:""" if iou_type == iou_type.BoundingBox: return self.prepare_for_coco_detection(predictions) elif iou_type == iou_type.Segmentation: return self.prepare_for_coco_segmentation(predictions) elif iou_type == iou_type.Keypoints: return self.prepare_for_coco_keypoint(predictions) else: raise ValueError(f"Unknown iou type {iou_type}") def prepare_for_coco_detection(self, predictions: Sequence[BboxPredTuple]): """ :param predictions: :type predictions: :return: :rtype:""" coco_results = [] for original_id, prediction in predictions.items(): if len(prediction) == 0: continue boxes = prediction["boxes"] boxes = minmax_to_xywh_torch(boxes).tolist() scores = prediction["scores"].tolist() labels = prediction["labels"].tolist() coco_results.extend( [ { "image_id": original_id, "category_id": labels[k], "bbox": box, "score": scores[k], } for k, box in enumerate(boxes) ] ) return coco_results def prepare_for_coco_segmentation(self, predictions: Sequence[SegmPredTuple]): """ :param predictions: :type predictions: :return: :rtype:""" coco_results = [] for original_id, prediction in predictions.items(): if len(prediction) == 0: continue scores = prediction["scores"] labels = prediction["labels"] masks = prediction["masks"] masks = masks > 0.5 scores = prediction["scores"].tolist() labels = prediction["labels"].tolist() rles = [ pycocotools.mask.encode( numpy.array(mask[0, :, :, numpy.newaxis], order="F") )[0] for mask in masks ] for rle in rles: rle["counts"] = rle["counts"].decode("utf-8") coco_results.extend( [ { "image_id": original_id, "category_id": labels[k], "segmentation": rle, "score": scores[k], } for k, rle in enumerate(rles) ] ) return coco_results def prepare_for_coco_keypoint(self, predictions: Sequence[KeypointsPredTuple]): """ :param predictions: :type predictions: :return: :rtype:""" coco_results = [] for original_id, prediction in predictions.items(): if len(prediction) == 0: continue scores = prediction["scores"].tolist() labels = prediction["labels"].tolist() keypoints = prediction["keypoints"] keypoints = keypoints.flatten(start_dim=1).tolist() coco_results.extend( [ { "image_id": original_id, "category_id": labels[k], "keypoints": keypoint, "score": scores[k], } for k, keypoint in enumerate(keypoints) ] ) return coco_results def evaluate(iou_type_evaluator: COCOeval) -> Tuple: """ Run per image evaluation on given images and store results (a list of dict) in self.evalImgs :return: None""" p = iou_type_evaluator.params # add backward compatibility if useSegm is specified in params if p.useSegm is not None: p.iouType = "segm" if p.useSegm == 1 else "bbox" print(f"useSegm (deprecated) is not None. Running {p.iouType} evaluation") # print('Evaluate annotation type *{}*'.format(p.iouType)) p.imgIds = list(numpy.unique(p.imgIds)) if p.useCats: p.catIds = list(numpy.unique(p.catIds)) p.maxDets = sorted(p.maxDets) iou_type_evaluator.params = p iou_type_evaluator._prepare() # loop through images, area range, max detection number cat_ids = p.catIds if p.useCats else [-1] compute_iou = None if p.iouType == "segm" or p.iouType == "bbox": compute_iou = iou_type_evaluator.computeIoU elif p.iouType == "keypoints": compute_iou = iou_type_evaluator.computeOks iou_type_evaluator.ious = { (imgId, catId): compute_iou(imgId, catId) for imgId in p.imgIds for catId in cat_ids } evaluate_img = iou_type_evaluator.evaluateImg max_det = p.maxDets[-1] eval_imgs = [ evaluate_img(img_id, cat_id, area_rng, max_det) for cat_id in cat_ids for area_rng in p.areaRng for img_id in p.imgIds ] eval_imgs = numpy.asarray( eval_imgs ).reshape( # this is NOT in the pycocotools code, but could be done outside len(cat_ids), len(p.areaRng), len(p.imgIds) ) iou_type_evaluator._paramsEval = copy.deepcopy(iou_type_evaluator.params) return p.imgIds, eval_imgs def merge(img_ids, eval_imgs): """ :param img_ids: :type img_ids: :param eval_imgs: :type eval_imgs: :return: :rtype:""" all_img_ids = all_gather_cuda(img_ids) all_eval_imgs = all_gather_cuda(eval_imgs) merged_img_ids = [] for p in all_img_ids: merged_img_ids.extend(p) merged_eval_imgs = [] for p in all_eval_imgs: merged_eval_imgs.append(p) merged_img_ids = numpy.array(merged_img_ids) merged_eval_imgs = numpy.concatenate(merged_eval_imgs, 2) # keep only unique (and in sorted order) images merged_img_ids, idx = numpy.unique(merged_img_ids, return_index=True) merged_eval_imgs = merged_eval_imgs[..., idx] return merged_img_ids, merged_eval_imgs def create_common_coco_eval(coco_eval, img_ids, eval_imgs): """ :param coco_eval: :type coco_eval: :param img_ids: :type img_ids: :param eval_imgs: :type eval_imgs:""" img_ids, eval_imgs = merge(img_ids, eval_imgs) img_ids = list(img_ids) eval_imgs = list(eval_imgs.flatten()) coco_eval.evalImgs = eval_imgs coco_eval.params.img_ids = img_ids coco_eval._paramsEval = copy.deepcopy(coco_eval.params) ################################################################# # From pycocotools, just removed the prints and fixed # a Python3 bug about unicode not defined ################################################################# # Ideally, pycocotools wouldn't have hard-coded prints # so that we could avoid copy-pasting those two functions def create_index(self): """ :param self: :type self:""" # create index # print('creating index...') anns, cats, imgs = {}, {}, {} imgToAnns, catToImgs = defaultdict(list), defaultdict(list) if "annotations" in self.dataset: for ann in self.dataset["annotations"]: imgToAnns[ann["image_id"]].append(ann) anns[ann["id"]] = ann if "images" in self.dataset: for img in self.dataset["images"]: imgs[img["id"]] = img if "categories" in self.dataset: for cat in self.dataset["categories"]: cats[cat["id"]] = cat if "annotations" in self.dataset and "categories" in self.dataset: for ann in self.dataset["annotations"]: catToImgs[ann["category_id"]].append(ann["image_id"]) # print('index created!') # create class members self.anns = anns self.imgToAnns = imgToAnns self.catToImgs = catToImgs self.imgs = imgs self.cats = cats def load_results(self, resFile) -> COCO: """ Load result file and return a result api object. :param self: :type self: :param resFile: :type resFile: :return: res (obj) : result api object""" res = COCO() res.dataset["images"] = [img for img in self.dataset["images"]] # print('Loading and preparing results...') # tic = time.time() if isinstance(resFile, torch._six.string_classes): anns = json.load(open(resFile)) elif type(resFile) == numpy.ndarray: anns = self.loadNumpyAnnotations(resFile) else: anns = resFile assert type(anns) == list, "results in not an array of objects" annsImgIds = [ann["image_id"] for ann in anns] assert set(annsImgIds) == ( set(annsImgIds) & set(self.getImgIds()) ), "Results do not correspond to current coco set" if "caption" in anns[0]: imgIds = set([img["id"] for img in res.dataset["images"]]) & set( [ann["image_id"] for ann in anns] ) res.dataset["images"] = [ img for img in res.dataset["images"] if img["id"] in imgIds ] for id, ann in enumerate(anns): ann["id"] = id + 1 elif "bbox" in anns[0] and not anns[0]["bbox"] == []: res.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) for id, ann in enumerate(anns): bb = ann["bbox"] x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]] if "segmentation" not in ann: ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann["area"] = bb[2] * bb[3] ann["id"] = id + 1 ann["iscrowd"] = 0 elif "segmentation" in anns[0]: res.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) for id, ann in enumerate(anns): # now only support compressed RLE format as segmentation results ann["area"] = pycocotools.mask.area(ann["segmentation"]) if "bbox" not in ann: ann["bbox"] = pycocotools.mask.toBbox(ann["segmentation"]) ann["id"] = id + 1 ann["iscrowd"] = 0 elif "keypoints" in anns[0]: res.dataset["categories"] = copy.deepcopy(self.dataset["categories"]) for id, ann in enumerate(anns): s = ann["keypoints"] x = s[0::3] y = s[1::3] x0, x1, y0, y1 = numpy.min(x), numpy.max(x), numpy.min(y), numpy.max(y) ann["area"] = (x1 - x0) * (y1 - y0) ann["id"] = id + 1 ann["bbox"] = [x0, y0, x1 - x0, y1 - y0] # print('DONE (t={:0.2f}s)'.format(time.time()- tic)) res.dataset["annotations"] = anns create_index(res) return res ################################################################# # end of straight copy from pycocotools, just removing the prints ################################################################# def coco_evaluation(dataset, predictions, output_dir: Path, iteration=None): """ :param dataset: :type dataset: :param predictions: :type predictions: :param output_dir: :type output_dir: :param iteration: :type iteration: :return: :rtype:""" coco_results = [] for i, prediction in enumerate(predictions): img_info = dataset.get_img_info(i) prediction = prediction.resize((img_info["width"], img_info["height"])).numpy() boxes, labels, scores = ( prediction["boxes"], prediction["labels"], prediction["scores"], ) image_id, annotation = dataset.get_annotation(i) class_mapper = dataset.contiguous_id_to_coco_id if labels.shape[0] == 0: continue boxes = boxes.tolist() labels = labels.tolist() scores = scores.tolist() coco_results.extend( [ { "image_id": image_id, "category_id": class_mapper[labels[k]], "bbox": [ box[0], box[1], box[2] - box[0], box[3] - box[1], ], # to xywh format "score": scores[k], } for k, box in enumerate(boxes) ] ) iou_type = "bbox" json_result_file = str(output_dir / f"{iou_type}.json") logger = logging.getLogger("SSD.inference") logger.info(f"Writing results to {json_result_file}...") with open(json_result_file, "w") as f: json.dump(coco_results, f) coco_gt = dataset.coco coco_dt = coco_gt.load_results(json_result_file) coco_eval = COCOeval(coco_gt, coco_dt, iou_type) coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() result_strings = [] keys = ["AP", "AP50", "AP75", "APs", "APm", "APl"] metrics = {} for i, key in enumerate(keys): metrics[key] = coco_eval.stats[i] logger.info(f"{key:<10}: {round(coco_eval.stats[i], 3)}") result_strings.append(f"{key:<10}: {round(coco_eval.stats[i], 3)}") if iteration is not None: result_path = str(output_dir / f"result_{iteration:07d}.txt") else: result_path = str( output_dir / f"result_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt" ) with open(result_path, "w") as f: f.write("\n".join(result_strings)) return dict(metrics=metrics) def get_iou_types(model) -> Sequence[IouType]: """ :param model: :type model: :return: :rtype:""" model_without_ddp = model if isinstance(model, torch.nn.parallel.DistributedDataParallel): model_without_ddp = model.module iou_types = [IouType.BoundingBox] if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN): iou_types.append(IouType.Segmentation) if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN): iou_types.append(IouType.Keypoints) return iou_types
PypiClean
/KailWeb-0.5.tar.gz/KailWeb-0.5/README.md
Module and HTML Template Renderer Classes This Python module contains two classes: Start and Renderer. The Start class is used to start the default web browser and load a specified HTML file. The constructor takes an optional page parameter, which specifies the path to the HTML file to load. The open_page() method opens the default web browser and loads the specified HTML file. If an error occurs while loading the page, an error message is printed to the console. The Renderer class is used to render HTML templates with dynamic data. The constructor takes a required template_file parameter, which specifies the path to the HTML template file. The render() method takes a dictionary of key-value pairs representing the data to include in the template. The method reads the contents of the HTML template file, replaces placeholders in the template with values from the context, and returns the rendered HTML code as a string. Example usage: ``python from kailweb import Start, Renderer # Open the default web browser and load index.html start = Start('/index.html') start.open_page() # Render a template with dynamic data renderer = Renderer('template.html') context = {'name': 'John', 'age': 30} html = renderer.render(context) print(html) ``
PypiClean
/Flask-User-AWS-1.0.1.7.tar.gz/Flask-User-AWS-1.0.1.7/flask_user/email_adapters/sendgrid_email_adapter.py
from __future__ import print_function # Non-system imports are moved into the methods to make them an optional requirement from flask_user import current_app, ConfigError from flask_user.email_adapters import EmailAdapterInterface SENDGRID_IMPORT_ERROR_MESSAGE = 'The sendgrid package is missing. Install sendgrid with "pip install sendgrid".' class SendgridEmailAdapter(EmailAdapterInterface): """ Implements the EmailAdapter interface to send emails with SendGrid Web API v3 using sendgrid-python.""" def __init__(self, app): """Check config settings and setup SendGrid Web API v3. Args: app(Flask): The Flask application instance. """ super(SendgridEmailAdapter, self).__init__(app) sendgrid_api_key = app.config.get('SENDGRID_API_KEY') if not sendgrid_api_key: raise ConfigError( "The SENDGRID_API_KEY setting is missing. Set SENDGRID_API_KEY in your app config.") # Setup sendgrid-python try: from sendgrid import SendGridAPIClient self.sg = SendGridAPIClient(apikey=sendgrid_api_key) except ImportError: raise ConfigError(SENDGRID_IMPORT_ERROR_MESSAGE) def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name): """ Send email message via sendgrid-python. Args: recipient: Email address or tuple of (Name, Email-address). subject: Subject line. html_message: The message body in HTML. text_message: The message body in plain text. """ if not current_app.testing: # pragma: no cover try: # Prepare Sendgrid helper objects from sendgrid.helpers.mail import Email, Content, Substitution, Mail from_email = Email(sender_email, sender_name) to_email = Email(recipient) text_content = Content('text/plain', text_message) html_content = Content('text/html', html_message) # Prepare Sendgrid Mail object # Note: RFC 1341: text must be first, followed by html mail = Mail(from_email, subject, to_email, text_content) mail.add_content(html_content) # Send mail via the Sendgrid API response = self.sg.client.mail.send.post(request_body=mail.get()) print(response.status_code) print(response.body) print(response.headers) except ImportError: raise ConfigError(SENDGRID_IMPORT_ERROR_MESSAGE) except Exception as e: print(e) print(e.body) raise
PypiClean
/MirrorX-6.0.13-py3-none-any.whl/bot/helper/ext_utils/fs_utils.py
import sys from bot import aria2, LOGGER, DOWNLOAD_DIR, ARIA_CHILD_PROC, MEGA_CHILD_PROC import shutil import os import pathlib import magic import tarfile from .exceptions import NotSupportedExtractionArchive def clean_download(path: str): if os.path.exists(path): LOGGER.info(f"Cleaning download: {path}") shutil.rmtree(path) def start_cleanup(): try: shutil.rmtree(DOWNLOAD_DIR) except FileNotFoundError: pass def clean_all(): aria2.remove_all(True) try: shutil.rmtree(DOWNLOAD_DIR) except FileNotFoundError: pass def exit_clean_up(signal, frame): try: LOGGER.info("Please wait, while we clean up the downloads and stop running downloads") clean_all() ARIA_CHILD_PROC.kill() MEGA_CHILD_PROC.kill() sys.exit(0) except KeyboardInterrupt: LOGGER.warning("Force Exiting before the cleanup finishes!") ARIA_CHILD_PROC.kill() MEGA_CHILD_PROC.kill() sys.exit(1) def get_path_size(path): if os.path.isfile(path): return os.path.getsize(path) total_size = 0 for root, dirs, files in os.walk(path): for f in files: abs_path = os.path.join(root, f) total_size += os.path.getsize(abs_path) return total_size def tar(org_path): tar_path = org_path + ".tar" path = pathlib.PurePath(org_path) LOGGER.info(f'Tar: orig_path: {org_path}, tar_path: {tar_path}') tar = tarfile.open(tar_path, "w") tar.add(org_path, arcname=path.name) tar.close() return tar_path def get_base_name(orig_path: str): if orig_path.endswith(".tar.bz2"): return orig_path.replace(".tar.bz2", "") elif orig_path.endswith(".tar.gz"): return orig_path.replace(".tar.gz", "") elif orig_path.endswith(".bz2"): return orig_path.replace(".bz2", "") elif orig_path.endswith(".gz"): return orig_path.replace(".gz", "") elif orig_path.endswith(".tar"): return orig_path.replace(".tar", "") elif orig_path.endswith(".tbz2"): return orig_path.replace("tbz2", "") elif orig_path.endswith(".tgz"): return orig_path.replace(".tgz", "") elif orig_path.endswith(".zip"): return orig_path.replace(".zip", "") elif orig_path.endswith(".7z"): return orig_path.replace(".7z", "") elif orig_path.endswith(".Z"): return orig_path.replace(".Z", "") elif orig_path.endswith(".rar"): return orig_path.replace(".rar", "") elif orig_path.endswith(".iso"): return orig_path.replace(".iso", "") elif orig_path.endswith(".wim"): return orig_path.replace(".wim", "") elif orig_path.endswith(".cab"): return orig_path.replace(".cab", "") elif orig_path.endswith(".apm"): return orig_path.replace(".apm", "") elif orig_path.endswith(".arj"): return orig_path.replace(".arj", "") elif orig_path.endswith(".chm"): return orig_path.replace(".chm", "") elif orig_path.endswith(".cpio"): return orig_path.replace(".cpio", "") elif orig_path.endswith(".cramfs"): return orig_path.replace(".cramfs", "") elif orig_path.endswith(".deb"): return orig_path.replace(".deb", "") elif orig_path.endswith(".dmg"): return orig_path.replace(".dmg", "") elif orig_path.endswith(".fat"): return orig_path.replace(".fat", "") elif orig_path.endswith(".hfs"): return orig_path.replace(".hfs", "") elif orig_path.endswith(".lzh"): return orig_path.replace(".lzh", "") elif orig_path.endswith(".lzma"): return orig_path.replace(".lzma", "") elif orig_path.endswith(".lzma2"): return orig_path.replace(".lzma2", "") elif orig_path.endswith(".mbr"): return orig_path.replace(".mbr", "") elif orig_path.endswith(".msi"): return orig_path.replace(".msi", "") elif orig_path.endswith(".mslz"): return orig_path.replace(".mslz", "") elif orig_path.endswith(".nsis"): return orig_path.replace(".nsis", "") elif orig_path.endswith(".ntfs"): return orig_path.replace(".ntfs", "") elif orig_path.endswith(".rpm"): return orig_path.replace(".rpm", "") elif orig_path.endswith(".squashfs"): return orig_path.replace(".squashfs", "") elif orig_path.endswith(".udf"): return orig_path.replace(".udf", "") elif orig_path.endswith(".vhd"): return orig_path.replace(".vhd", "") elif orig_path.endswith(".xar"): return orig_path.replace(".xar", "") else: raise NotSupportedExtractionArchive('File format not supported for extraction') def get_mime_type(file_path): mime = magic.Magic(mime=True) mime_type = mime.from_file(file_path) mime_type = mime_type if mime_type else "text/plain" return mime_type
PypiClean
/Inukshuk-0.1.2.tar.gz/Inukshuk-0.1.2/inukshuk/nodes/expression.py
from ..lexer import ( TOKEN_ID_WHITESPACE, TOKEN_ID_IDENTIFIER, TOKEN_ID_INTEGER, TOKEN_ID_FLOAT, TOKEN_ID_STRING_SINGLE_QUOTES, TOKEN_ID_STRING_DOUBLE_QUOTES, TOKEN_ID_SYMBOL) from .base import ( ParserError, ParserNode, MultiChildrenParserNode) from .const import ( StringNode, FloatNode, IntegerNode, ListNode, const_identifiers) from .filters import FilterNode from .operators import ( NotOperatorNode, binary_operator_nodes, misc_operator_nodes) class InvalidOperatorError(Exception): def __init__(self, value_node, op_name): super().__init__("Invalid operator: %s" % op_name) self.value_node = value_node self.op_name = op_name class ExpressionNode(ParserNode): def __init__(self, with_tail=True): super().__init__() self.child = None self.with_tail = with_tail def iterChildren(self): yield self.child def add(self, node): if self.child is None: self.child = node else: raise Exception("This node already has a child: %s" % self.child) def parse(self, parser): parser_read = parser.read parser_peek = parser.peek parser_expect = parser.expect parser_skip = parser.skip _, token_id, value = parser_peek() if token_id == TOKEN_ID_SYMBOL and value == '(': parser_read() parser_skip(TOKEN_ID_WHITESPACE) self.recurseInto(parser, ExpressionNode()) parser_skip(TOKEN_ID_WHITESPACE) parser_expect(TOKEN_ID_SYMBOL, ')') parser_skip(TOKEN_ID_WHITESPACE) if self.with_tail: self._parseTail(parser) return if token_id == TOKEN_ID_SYMBOL and value == '[': parser_read() parser_skip(TOKEN_ID_WHITESPACE) self.recurseInto(parser, ListNode()) parser_skip(TOKEN_ID_WHITESPACE) parser_expect(TOKEN_ID_SYMBOL, ']') parser_skip(TOKEN_ID_WHITESPACE) if self.with_tail: self._parseTail(parser) return if token_id == TOKEN_ID_IDENTIFIER and value == 'not': parser_read() parser_skip(TOKEN_ID_WHITESPACE) self.recurseInto(parser, NotOperatorNode()) parser_skip(TOKEN_ID_WHITESPACE) if self.with_tail: self._parseTail(parser) return self.child = self._parseValueWithFilter(parser) parser_skip(TOKEN_ID_WHITESPACE) if self.with_tail: self._parseTail(parser) def _parseValueWithFilter(self, parser): parser_read = parser.read parser_skip = parser.skip parser_isNext = parser.isNext # Start with the value, a context query or a string. value_node = self._parseValue(parser) # Now see if there are any filters. expr_root_node = value_node parser_skip(TOKEN_ID_WHITESPACE) while parser_isNext(TOKEN_ID_SYMBOL, '|'): parser_read() parser_skip(TOKEN_ID_WHITESPACE) flt_node = FilterNode() flt_node.add(expr_root_node) flt_node.parse(parser) expr_root_node = flt_node parser_skip(TOKEN_ID_WHITESPACE) return expr_root_node def _parseValue(self, parser): parser_readValue = parser.readValue parser_isNext = parser.isNext parser_peek = parser.peek # A context query, a string, a number, a list, a dictionary, # a tuple, or true/false. value_node = None if parser_isNext(TOKEN_ID_IDENTIFIER): _, _, idval = parser_peek() value_node = const_identifiers.get(idval) if value_node is None: value_node = ContextQueryNode() value_node.parse(parser) else: parser.read() elif (parser_isNext(TOKEN_ID_STRING_SINGLE_QUOTES) or parser_isNext(TOKEN_ID_STRING_DOUBLE_QUOTES)): value_node = StringNode(parser_readValue()) elif parser_isNext(TOKEN_ID_FLOAT): value_node = FloatNode(float(parser_readValue())) elif parser_isNext(TOKEN_ID_INTEGER): value_node = IntegerNode(int(parser_readValue())) elif parser_isNext(TOKEN_ID_SYMBOL, '-'): parser_readValue() if parser_isNext(TOKEN_ID_FLOAT): value_node = FloatNode(-float(parser_readValue())) elif parser_isNext(TOKEN_ID_INTEGER): value_node = IntegerNode(-int(parser_readValue())) else: line_num, _, value = parser.peek() raise ParserError(line_num, "unexpected token '%s'." % value) else: line_num, _, value = parser_peek() raise ParserError(line_num, "unexpected token '%s'." % value) return value_node def _parseTail(self, parser): # See if the expression continue... in which case we wrap # our current child into a bigger expression. parser_read = parser.read parser_peek = parser.peek parser_skip = parser.skip child_op_prec = getattr(self.child, '__op_precedence__', None) child_is_binary_op = getattr(self.child, '__op_type__', 0) == 2 line_num, token_id, value = parser_peek() if token_id == TOKEN_ID_IDENTIFIER: op_node_cls = misc_operator_nodes.get(value) if op_node_cls is not None: # Test or boolean operator. parser_read() parser_skip(TOKEN_ID_WHITESPACE) if op_node_cls.__op_type__ != 2: raise Exception("Unexpected unary operator: %s" % op_node_cls) as_tail = False if child_is_binary_op: as_tail = ( child_op_prec < op_node_cls.__op_precedence__) op_node = op_node_cls() if as_tail: op_node.add(self.child.child2) self.child.child2 = op_node op_node.parse(parser) else: op_node.add(self.child) self.child = op_node op_node.parse(parser) parser_skip(TOKEN_ID_WHITESPACE) self._parseTail(parser) return elif token_id == TOKEN_ID_SYMBOL: op_name = None # Binary operators are 1 or 2 characters... see if any operator # matches this first character. for k in binary_operator_nodes.keys(): if k[0] == value: op_name = value break if op_name: # Yep, it matched. Now see if there's a 2nd character too. parser_read() _, token_id, value = parser_peek() if token_id == TOKEN_ID_SYMBOL: # Try matching a 2-character operator. If nothing matches, # go back to the one character operator. # # This can happen for instance with: # {{ a /(2 + b) }} # ^^ # 2-chars! # if (op_name + value) in binary_operator_nodes: op_name += value parser_read() op_node_cls = binary_operator_nodes.get(op_name) if op_node_cls is not None: # Binary operator. parser_skip(TOKEN_ID_WHITESPACE) as_tail = False if child_is_binary_op: as_tail = ( child_op_prec < op_node_cls.__op_precedence__) op_node = op_node_cls() if as_tail: op_node.add(self.child.child2) self.child.child2 = op_node op_node.parse(parser) else: op_node.add(self.child) self.child = op_node op_node.parse(parser) parser_skip(TOKEN_ID_WHITESPACE) self._parseTail(parser) return else: # Raise an exception because at this point we can't # give back what we consumed. raise InvalidOperatorError(self.child, op_name) def render(self, ctx, data, out): return self.child.render(ctx, data, out) def compile(self, ctx, out): self.child.compile(ctx, out) class ExpressionWrapperNode(ExpressionNode): def render(self, ctx, data, out): # This is where we make sure things are converted to strings for # rendering the output. val = self.child.render(ctx, data, out) out(ctx.engine.escape(val)) def compile(self, ctx, out): self.child.compile(ctx, out) val = ctx.popvar() out.indent().write('out_write_escaped(%s)\n' % val) class ContextQueryNode(MultiChildrenParserNode): TYPE_PROPERTY = 0 TYPE_ARRAY_ITEM = 1 TYPE_DICT_ITEM = 2 TYPE_FUNC_CALL = 3 def __init__(self, name=None): super().__init__() self.name = name self.query_type = ContextQueryNode.TYPE_PROPERTY self.array_item_query = None self.is_head = True self.has_tail = False self.force_cache_query = False def parse(self, parser): parser_expect = parser.expect parser_isNext = parser.isNext parser_skip = parser.skip parser_read = parser.read self.name = parser_expect(TOKEN_ID_IDENTIFIER) # See if we have either a dictionary access or a function call. if parser_isNext(TOKEN_ID_SYMBOL, '['): parser_read() parser_skip(TOKEN_ID_WHITESPACE) if parser_isNext(TOKEN_ID_INTEGER): # Index or full slice or first-half-slice (array[i:]). is_slice = False index1 = int(parser_read()[2]) index2 = None parser_skip(TOKEN_ID_WHITESPACE) if parser_isNext(TOKEN_ID_SYMBOL, ':'): parser_read() parser_skip(TOKEN_ID_WHITESPACE) is_slice = True if parser_isNext(TOKEN_ID_INTEGER): index2 = int(parser_read()[2]) self.array_item_query = (is_slice, index1, index2) self.query_type = ContextQueryNode.TYPE_ARRAY_ITEM elif parser_isNext(TOKEN_ID_SYMBOL, ':'): # Second-half-slice (array[:i]). parser_read() parser_skip(TOKEN_ID_WHITESPACE) index2 = parser_expect(TOKEN_ID_INTEGER) self.array_item_query = (True, None, int(index2)) self.query_type = ContextQueryNode.TYPE_ARRAY_ITEM else: self.recurseInto(parser, ExpressionNode()) self.query_type = ContextQueryNode.TYPE_DICT_ITEM parser_skip(TOKEN_ID_WHITESPACE) parser_expect(TOKEN_ID_SYMBOL, ']') elif parser_isNext(TOKEN_ID_SYMBOL, '('): self.query_type = ContextQueryNode.TYPE_FUNC_CALL parser_read() parser_skip(TOKEN_ID_WHITESPACE) needs_comma = False while not parser_isNext(TOKEN_ID_SYMBOL, ')'): if needs_comma: parser_skip(TOKEN_ID_WHITESPACE) parser_expect(TOKEN_ID_SYMBOL, ',') parser_skip(TOKEN_ID_WHITESPACE) self.recurseInto(parser, ExpressionNode()) needs_comma = True parser_expect(TOKEN_ID_SYMBOL, ')') # See if the context query continues. if parser_isNext(TOKEN_ID_SYMBOL, '.'): parser_read() self.has_tail = True tail = ContextQueryNode() tail.is_head = False self.recurseInto(parser, tail) def render(self, ctx, data, out): children = self.children if self.has_tail: children = self.children[:-1] if self.is_head: val = ctx.queryRoot(data, self.name) else: val = ctx.query(data, self.name) if self.query_type == ContextQueryNode.TYPE_ARRAY_ITEM: is_slice, index1, index2 = self.array_item_query if is_slice: valid_index1 = index1 is not None valid_index2 = index2 is not None if valid_index1 and valid_index2: val = val[index1:index2] elif valid_index1: val = val[index1:] elif valid_index2: val = val[:index2] else: raise Exception("Got slicing notation without indices!") else: val = val[index1] elif self.query_type == ContextQueryNode.TYPE_DICT_ITEM: key = children[0].render(ctx, data, out) val = val[key] elif self.query_type == ContextQueryNode.TYPE_FUNC_CALL: params = list(self._renderTheseChildren(children, ctx, data, out)) if getattr(val, 'needs_context', False): val = val(ctx, data, out, *params) else: val = val(*params) elif self.query_type != ContextQueryNode.TYPE_PROPERTY: raise Exception("Unexpected query type: %s" % self.query_type) if self.has_tail: return self.children[-1].render(ctx, val, out) else: return val def _renderTheseChildren(self, children, ctx, data, out): for c in children: yield c.render(ctx, data, out) def compile(self, ctx, out): children = self.children if self.has_tail: children = self.children[:-1] data_name = 'data' if not self.is_head: data_name = ctx.popvar() if self.query_type == ContextQueryNode.TYPE_PROPERTY: val = self._cachedQueryVar(ctx, self.name) if val is None: val = self._newQuery(data_name, self.name) if self.force_cache_query: cached_val = ctx.varname('cached_query') out.indent().write('%s = %s\n' % (cached_val, val)) val = cached_val ctx.cacheQuery(self.name, val) ctx.pushvar(val) elif self.query_type == ContextQueryNode.TYPE_ARRAY_ITEM: val = self._cachedQueryVar(ctx, self.name) if val is None: val = self._newQuery(data_name, self.name) if self.force_cache_query: cached_val = ctx.varname('cached_query') out.indent().write('%s = %s\n' % (cached_val, val)) val = cached_val ctx.cacheQuery(self.name, val) is_slice, index1, index2 = self.array_item_query if is_slice: valid_index1 = index1 is not None valid_index2 = index2 is not None if valid_index1 and valid_index2: expr = '%s[%s:%s]' % (val, index1, index2) elif valid_index1: expr = '%s[%s:]' % (val, index1) elif valid_index2: expr = '%s[:%s]' % (val, index2) else: raise Exception("Got slicing notation without indices!") else: expr = '%s[%s]' % (val, index1) ctx.pushvar(expr) elif self.query_type == ContextQueryNode.TYPE_DICT_ITEM: dname = self._cachedQueryVar(ctx, self.name) if dname is None: dname = self._newQuery(data_name, self.name) if self.force_cache_query: cached_dname = ctx.varname('cached_query') out.indent().write('%s = %s\n' % (cached_dname, dname)) dname = cached_dname ctx.cacheQuery(self.name, dname) children[0].compile(ctx, out) expr = '%s[%s]' % (dname, ctx.popvar()) ctx.pushvar(expr) elif self.query_type == ContextQueryNode.TYPE_FUNC_CALL: fname = self._cachedQueryVar(ctx, self.name) if fname is None: fname = self._newQuery(data_name, self.name) if self.force_cache_query: cached_fname = ctx.varname('cached_query') out.indent().write('%s = %s\n' % (cached_fname, fname)) fname = cached_fname ctx.cacheQuery(self.name, fname) param_names = [] for c in children: c.compile(ctx, out) param_names.append(ctx.popvar()) if param_names: val = ('ctx_invoke(data, out_write, %s, %s)' % (fname, ', '.join(param_names))) else: val = 'ctx_invoke(data, out_write, %s)' % fname ctx.pushvar(val) else: raise Exception("Unexpected query type: %s" % self.query_type) if self.has_tail: self.children[-1].compile(ctx, out) def _cachedQueryVar(self, ctx, query): if self.is_head: return ctx.getCachedQuery(query) return None def _newQuery(self, dataname, query): if self.is_head: return 'ctx_queryRoot(%s, %s)' % (dataname, repr(query)) else: return 'ctx_query(%s, %s)' % (dataname, repr(query))
PypiClean
/Discohook-1.0.0.tar.gz/Discohook-1.0.0/discohook/client.py
import logging import json import time import datetime import requests logger = logging.getLogger(__name__) __all__ = ["Discohook", "DiscohookEmbed", "DiscohookException"] class Discohook: """ Webhook for Discord """ def __init__(self, url, **kwargs): """ Init Discohook - param url : Discohook webhook url - type url : str, list - keyword content : the message contents - keyword username : override the default username of the webhook - keyword avatar_url : override the default avatar of the webhook - keyword tts : true if this is a TTS message - keyword file : file contents - keyword filename : file name - keyword embeds : list of embedded rich content - keyword allowed_mentions : allowed mentions for the message - keyword proxies : dict of proxies - keyword timeout : (optional) amount of seconds to wait for a response from Discord """ self.url = url self.content = kwargs.get("content") self.username = kwargs.get("username") self.avatar_url = kwargs.get("avatar_url") self.tts = kwargs.get("tts", False) self.files = kwargs.get("files", dict()) self.embeds = kwargs.get("embeds", []) self.proxies = kwargs.get("proxies") self.allowed_mentions = kwargs.get("allowed_mentions") self.timeout = kwargs.get("timeout") self.rate_limit_retry = kwargs.get("rate_limit_retry") def add_file(self, file, filename): """ Adds a file to the webhook - param file : `file content` - param filename : `filename` - return : """ self.files["_{}".format(filename)] = (filename, file) def add_embed(self, embed): """ Adds an embedded rich content - param embed : embed object or dict """ self.embeds.append(embed.__dict__ if isinstance(embed, DiscohookEmbed) else embed) def remove_embed(self, index): """ Removes embedded rich content from `self.embeds` - param index : index of embed in `self.embeds` """ self.embeds.pop(index) def remove_file(self, filename): """ Removes file from `self.files` using specified `filename` if it exists - param filename : `filename` """ filename = "_{}".format(filename) if filename in self.files: del self.files[filename] def get_embeds(self): """ Gets all `self.embeds` as list - return : `self.embeds` """ return self.embeds def set_proxies(self, proxies): """ Sets Proxies - param proxies : `dict of proxies` - type proxies : `dict` """ self.proxies = proxies def set_content(self, content): """ Sets Content - param content : `content string` - type content : `string` """ self.content = content @property def json(self): """ Convert webhook data to json - return webhook data as json """ data = dict() embeds = self.embeds self.embeds = list() # convert DiscohookEmbed to dict for embed in embeds: self.add_embed(embed) for key, value in self.__dict__.items(): if value and key not in ["url", "files", "filename"]: data[key] = value embeds_empty = ( all(not embed for embed in data["embeds"]) if "embeds" in data else True ) if embeds_empty and "content" not in data and bool(self.files) is False: logger.error("Webhook message is empty! set content or embed data") return data def remove_embeds(self): """ Sets `self.embeds` to empty `list`. """ self.embeds = [] def remove_files(self): """ Sets `self.files` to empty `dict`. """ self.files = {} def api_post_request(self, url): if bool(self.files) is False: response = requests.post(url, json=self.json, proxies=self.proxies, params={'wait': True}, timeout=self.timeout) else: self.files["payload_json"] = (None, json.dumps(self.json)) response = requests.post(url, files=self.files, proxies=self.proxies, timeout=self.timeout) return response def execute(self, remove_embeds=False, remove_files=False): """ Executes the Webhook - param remove_embeds : if set to True, calls `self.remove_embeds()` to empty `self.embeds` after webhook is executed - param remove_files : if set to True, calls `self.remove_files()` to empty `self.files` after webhook is executed - return : Webhook response """ webhook_urls = self.url if isinstance(self.url, list) else [self.url] urls_len = len(webhook_urls) responses = [] for i, url in enumerate(webhook_urls): response = self.api_post_request(url) if response.status_code in [200, 204]: logger.debug( "[{index}/{length}] Webhook executed".format( index=i+1, length=urls_len ) ) elif response.status_code == 429 and self.rate_limit_retry: while response.status_code == 429: errors = json.loads( response.content.decode('utf-8')) wh_sleep = (int(errors['retry_after']) / 1000) + 0.15 time.sleep(wh_sleep) logger.error( "Webhook rate limited: sleeping for {wh_sleep} " "seconds...".format( wh_sleep=wh_sleep ) ) response = self.api_post_request(url) if response.status_code in [200, 204]: logger.debug( "[{index}/{length}] Webhook executed".format( index=i + 1, length=urls_len ) ) break else: logger.error( "[{index}/{length}] Webhook status code {status_code}: {content}".format( index=i+1, length=urls_len, status_code=response.status_code, content=response.content.decode("utf-8"), ) ) responses.append(response) if remove_embeds: self.remove_embeds() if remove_files: self.remove_files() return responses[0] if len(responses) == 1 else responses def edit(self, sent_webhook): """ Edits the webhook passed as a response - param sent_webhook : webhook.execute() response - return : Another webhook response """ sent_webhook = sent_webhook if isinstance(sent_webhook, list) else [sent_webhook] webhook_len = len(sent_webhook) responses = [] for i, webhook in enumerate(sent_webhook): url = webhook.url.split('?')[0] # removes any query params previous_sent_message_id = json.loads(webhook.content.decode('utf-8'))['id'] if bool(self.files) is False: response = requests.patch(url+'/messages/'+str(previous_sent_message_id), json=self.json, proxies=self.proxies, params={'wait': True}, timeout=self.timeout) else: self.files["payload_json"] = (None, json.dumps(self.json)) response = requests.patch(url+'/messages/'+str(previous_sent_message_id), files=self.files, proxies=self.proxies, timeout=self.timeout) if response.status_code in [200, 204]: logger.debug( "[{index}/{length}] Webhook edited".format( index=i + 1, length=webhook_len, ) ) else: logger.error( "[{index}/{length}] Webhook status code {status_code}: {content}".format( index=i + 1, length=webhook_len, status_code=response.status_code, content=response.content.decode("utf-8"), ) ) responses.append(response) return responses[0] if len(responses) == 1 else responses def delete(self, sent_webhook): """ Deletes the webhook passed as a response - param sent_webhook : webhook.execute() response - return : Response """ sent_webhook = sent_webhook if isinstance(sent_webhook, list) else [sent_webhook] webhook_len = len(sent_webhook) responses = [] for i, webhook in enumerate(sent_webhook): url = webhook.url.split('?')[0] # removes any query params previous_sent_message_id = json.loads(webhook.content.decode('utf-8'))['id'] response = requests.delete(url+'/messages/'+str(previous_sent_message_id), proxies=self.proxies, timeout=self.timeout) if response.status_code in [200, 204]: logger.debug( "[{index}/{length}] Webhook deleted".format( index=i + 1, length=webhook_len, ) ) else: logger.error( "[{index}/{length}] Webhook status code {status_code}: {content}".format( index=i + 1, length=webhook_len, status_code=response.status_code, content=response.content.decode("utf-8"), ) ) responses.append(response) return responses[0] if len(responses) == 1 else responses class DiscohookEmbed: """ Discord Embed """ def __init__(self, **kwargs): """ Init Discord Embed - keyword title : title of embed - keyword description : description of embed - keyword url : url of embed - keyword timestamp : timestamp of embed content - keyword color : color code of the embed as int - keyword hex_color : color code of the embed as a hex string - keyword footer : footer information - keyword image : image information - keyword thumbnail : thumbnail information - keyword video : video information - keyword provider : provider information - keyword author : author information - keyword fields : fields information """ self.title = kwargs.get("title") self.description = kwargs.get("description") self.url = kwargs.get("url") self.timestamp = kwargs.get("timestamp") self.color = kwargs.get("color") if self.color: self.set_color(self.color) self.hex_color = kwargs.get("hex_color") self.footer = kwargs.get("footer") self.image = kwargs.get("image") self.thumbnail = kwargs.get("thumbnail") self.video = kwargs.get("video") self.provider = kwargs.get("provider") self.author = kwargs.get("author") self.fields = kwargs.get("fields", []) def set_title(self, title): """ Set title of embed - param title : title of embed """ self.title = title def set_description(self, description): """ Set description of embed - param description : description of embed """ self.description = description def set_url(self, url): """ Set url of embed - param url : url of embed """ self.url = url def set_timestamp(self, timestamp=None): """ Set timestamp of embed content - param timestamp : (optional) timestamp of embed content """ if timestamp is None: timestamp = time.time() self.timestamp = str(datetime.datetime.utcfromtimestamp(timestamp)) def set_color(self, color): """ Set color code of the embed as decimal(int) or hex(string) - param color : color code of the embed as decimal(int) or hex(string) """ if isinstance(color, str): self.color = int(color, 16) else: self.color = color def set_footer(self, **kwargs): """ Set footer information of embed - keyword text : footer text - keyword icon_url : url of footer icon (only supports http(s) and attachments) - keyword proxy_icon_url : a proxied url of footer icon """ self.footer = { "text": kwargs.get("text"), "icon_url": kwargs.get("icon_url"), "proxy_icon_url": kwargs.get("proxy_icon_url"), } def set_image(self, **kwargs): """ Set image of embed - keyword url : source url of image (only supports http(s) and attachments) - keyword proxy_url : a proxied url of the image - keyword height : height of image - keyword width : width of image """ self.image = { "url": kwargs.get("url"), "proxy_url": kwargs.get("proxy_url"), "height": kwargs.get("height"), "width": kwargs.get("width"), } def set_thumbnail(self, **kwargs): """ Set thumbnail of embed - keyword url : source url of thumbnail (only supports http(s) and attachments) - keyword proxy_url : a proxied thumbnail of the image - keyword height : height of thumbnail - keyword width : width of thumbnail """ self.thumbnail = { "url": kwargs.get("url"), "proxy_url": kwargs.get("proxy_url"), "height": kwargs.get("height"), "width": kwargs.get("width"), } def set_video(self, **kwargs): """ Set video of embed - keyword url : source url of video - keyword height : height of video - keyword width : width of video """ self.video = { "url": kwargs.get("url"), "height": kwargs.get("height"), "width": kwargs.get("width"), } def set_provider(self, **kwargs): """ set provider of embed - keyword name : name of provider - keyword url : url of provider """ self.provider = { "name": kwargs.get("name"), "url": kwargs.get("url"), } def set_author(self, **kwargs): """ Set author of embed - keyword name : name of author - keyword url : url of author - keyword icon_url : url of author icon (only supports http(s) and attachments) - keyword proxy_icon_url : a proxied url of author icon """ self.author = { "name": kwargs.get("name"), "url": kwargs.get("url"), "icon_url": kwargs.get("icon_url"), "proxy_icon_url": kwargs.get("proxy_icon_url"), } def add_embed_field(self, **kwargs): """ Set field of embed - keyword name : name of the field - keyword value : value of the field - keyword inline : (optional) whether or not this field should display inline """ self.fields.append( { "name": kwargs.get("name"), "value": kwargs.get("value"), "inline": kwargs.get("inline", True), } ) def del_embed_field(self, index): """ Remove field from `self.fields` - param index : index of field in `self.fields` """ self.fields.pop(index) def get_embed_fields(self): """ Get all `self.fields` as list - return : `self.fields` """ return self.fields
PypiClean
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/axelrod/moran.py
from collections import Counter from typing import Callable, List, Optional, Set, Tuple import matplotlib.pyplot as plt import numpy as np from axelrod import DEFAULT_TURNS, EvolvablePlayer, Game, Player from axelrod.deterministic_cache import DeterministicCache from axelrod.graph import Graph, complete_graph from axelrod.match import Match from axelrod.random_ import BulkRandomGenerator, RandomGenerator class MoranProcess(object): def __init__( self, players: List[Player], turns: int = DEFAULT_TURNS, prob_end: float = None, noise: float = 0, game: Game = None, deterministic_cache: DeterministicCache = None, mutation_rate: float = 0.0, mode: str = "bd", interaction_graph: Graph = None, reproduction_graph: Graph = None, fitness_transformation: Callable = None, mutation_method="transition", stop_on_fixation=True, seed=None, match_class=Match, ) -> None: """ An agent based Moran process class. In each round, each player plays a Match with each other player. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly chosen player. If the mutation_rate is 0, the population will eventually fixate on exactly one player type. In this case a StopIteration exception is raised and the play stops. If the mutation_rate is not zero, then the process will iterate indefinitely, so mp.play() will never exit, and you should use the class as an iterator instead. When a player mutates it chooses a random player type from the initial population. This is not the only method yet emulates the common method in the literature. It is possible to pass interaction graphs and reproduction graphs to the Moran process. In this case, in each round, each player plays a Match with each neighboring player according to the interaction graph. Players are assigned a fitness score by their total score from all matches in the round. A player is chosen to reproduce proportionally to fitness, possibly mutated, and is cloned. The clone replaces a randomly chosen neighboring player according to the reproduction graph. Parameters ---------- players turns: The number of turns in each pairwise interaction prob_end : The probability of a given turn ending a match noise: The background noise, if any. Randomly flips plays with probability `noise`. game: axelrod.Game The game object used to score matches. deterministic_cache: A optional prebuilt deterministic cache mutation_rate: The rate of mutation. Replicating players are mutated with probability `mutation_rate` mode: Birth-Death (bd) or Death-Birth (db) interaction_graph: Axelrod.graph.Graph The graph in which the replicators are arranged reproduction_graph: Axelrod.graph.Graph The reproduction graph, set equal to the interaction graph if not given fitness_transformation: A function mapping a score to a (non-negative) float mutation_method: A string indicating if the mutation method should be between original types ("transition") or based on the player's mutation method, if present ("atomic"). stop_on_fixation: A bool indicating if the process should stop on fixation seed: int A random seed for reproducibility """ m = mutation_method.lower() if m in ["atomic", "transition"]: self.mutation_method = m else: raise ValueError( "Invalid mutation method {}".format(mutation_method) ) assert (mutation_rate >= 0) and (mutation_rate <= 1) assert (noise >= 0) and (noise <= 1) mode = mode.lower() assert mode in ["bd", "db"] self.mode = mode if deterministic_cache is not None: self.deterministic_cache = deterministic_cache else: self.deterministic_cache = DeterministicCache() self.turns = turns self.match_class = match_class self.prob_end = prob_end self.game = game self.noise = noise self.initial_players = players # save initial population self.players = [] # type: List self.populations = [] # type: List self.score_history = [] # type: List self.winning_strategy_name = None # type: Optional[str] self.mutation_rate = mutation_rate self.stop_on_fixation = stop_on_fixation self._random = RandomGenerator(seed=seed) self._bulk_random = BulkRandomGenerator(self._random.random_seed_int()) self.set_players() # Build the set of mutation targets # Determine the number of unique types (players) keys = set([str(p) for p in players]) # Create a dictionary mapping each type to a set of representatives # of the other types d = dict() for p in players: d[str(p)] = p mutation_targets = dict() for key in sorted(keys): mutation_targets[key] = [ v for (k, v) in sorted(d.items()) if k != key ] self.mutation_targets = mutation_targets if interaction_graph is None: interaction_graph = complete_graph(len(players), loops=False) if reproduction_graph is None: reproduction_graph = Graph( interaction_graph.edges, directed=interaction_graph.directed ) reproduction_graph.add_loops() # Check equal vertices v1 = interaction_graph.vertices v2 = reproduction_graph.vertices assert list(v1) == list(v2) self.interaction_graph = interaction_graph self.reproduction_graph = reproduction_graph self.fitness_transformation = fitness_transformation # Map players to graph vertices self.locations = sorted(interaction_graph.vertices) self.index = dict( zip(sorted(interaction_graph.vertices), range(len(players))) ) self.fixated = self.fixation_check() def set_players(self) -> None: """Copy the initial players into the first population, setting seeds as needed.""" self.players = [] for player in self.initial_players: if (self.mutation_method == "atomic") and issubclass( player.__class__, EvolvablePlayer ): # For reproducibility, we generate random seeds for evolvable players. seed = next(self._bulk_random) new_player = player.create_new(seed=seed) self.players.append(new_player) else: player.reset() self.players.append(player) self.populations = [self.population_distribution()] def fitness_proportionate_selection( self, scores: List, fitness_transformation: Callable = None ) -> int: """Randomly selects an individual proportionally to score. Parameters ---------- scores: Any sequence of real numbers fitness_transformation: A function mapping a score to a (non-negative) float Returns ------- An index of the above list selected at random proportionally to the list element divided by the total. """ if fitness_transformation is None: csums = np.cumsum(scores) else: csums = np.cumsum([fitness_transformation(s) for s in scores]) total = csums[-1] r = self._random.random() * total for i, x in enumerate(csums): if x >= r: break return i def mutate(self, index: int) -> Player: """Mutate the player at index. Parameters ---------- index: The index of the player to be mutated """ if self.mutation_method == "atomic": if not issubclass(self.players[index].__class__, EvolvablePlayer): raise TypeError( "Player is not evolvable. Use a subclass of EvolvablePlayer." ) return self.players[index].mutate() # Assuming mutation_method == "transition" if self.mutation_rate > 0: # Choose another strategy at random from the initial population r = self._random.random() if r < self.mutation_rate: s = str(self.players[index]) j = self._random.randrange(0, len(self.mutation_targets[s])) p = self.mutation_targets[s][j] return p.clone() # Just clone the player return self.players[index].clone() def death(self, index: int = None) -> int: """ Selects the player to be removed. Note that the in the birth-death case, the player that is reproducing may also be replaced. However in the death-birth case, this player will be excluded from the choices. Parameters ---------- index: The index of the player to be removed """ if index is None: # Select a player to be replaced globally i = self._random.randrange(0, len(self.players)) # Record internally for use in _matchup_indices self.dead = i else: # Select locally # index is not None in this case vertex = self._random.choice( sorted( self.reproduction_graph.out_vertices(self.locations[index]) ) ) i = self.index[vertex] return i def birth(self, index: int = None) -> int: """The birth event. Parameters ---------- index: The index of the player to be copied """ # Compute necessary fitnesses. scores = self.score_all() if index is not None: # Death has already occurred, so remove the dead player from the # possible choices scores.pop(index) # Make sure to get the correct index post-pop j = self.fitness_proportionate_selection( scores, fitness_transformation=self.fitness_transformation ) if j >= index: j += 1 else: j = self.fitness_proportionate_selection( scores, fitness_transformation=self.fitness_transformation ) return j def fixation_check(self) -> bool: """ Checks if the population is all of a single type Returns ------- Boolean: True if fixation has occurred (population all of a single type) """ classes = set(str(p) for p in self.players) self.fixated = False if len(classes) == 1: # Set the winning strategy name variable self.winning_strategy_name = str(self.players[0]) self.fixated = True return self.fixated def __next__(self) -> object: """ Iterate the population: - play the round's matches - chooses a player proportionally to fitness (total score) to reproduce - mutate, if appropriate - choose a player to be replaced - update the population Returns ------- MoranProcess: Returns itself with a new population """ # Check the exit condition, that all players are of the same type. if self.stop_on_fixation and self.fixation_check(): raise StopIteration if self.mode == "bd": # Birth then death j = self.birth() i = self.death(j) elif self.mode == "db": # Death then birth i = self.death() self.players[i] = None j = self.birth(i) # Mutate and/or replace player i with clone of player j self.players[i] = self.mutate(j) # Record population. self.populations.append(self.population_distribution()) return self def _matchup_indices(self) -> Set[Tuple[int, int]]: """ Generate the matchup pairs. Returns ------- indices: A set of 2 tuples of matchup pairs: the collection of all players who play each other. """ indices = set() # type: Set # For death-birth we only want the neighbors of the dead node # The other calculations are unnecessary if self.mode == "db": source = self.index[self.dead] self.dead = None sources = sorted(self.interaction_graph.out_vertices(source)) else: # birth-death is global sources = sorted(self.locations) for i, source in enumerate(sources): for target in sorted(self.interaction_graph.out_vertices(source)): j = self.index[target] if (self.players[i] is None) or (self.players[j] is None): continue # Don't duplicate matches if ((i, j) in indices) or ((j, i) in indices): continue indices.add((i, j)) return indices def score_all(self) -> List: """Plays the next round of the process. Every player is paired up against every other player and the total scores are recorded. Returns ------- scores: List of scores for each player """ N = len(self.players) scores = [0] * N for i, j in self._matchup_indices(): player1 = self.players[i] player2 = self.players[j] match = self.match_class( (player1, player2), turns=self.turns, prob_end=self.prob_end, noise=self.noise, game=self.game, deterministic_cache=self.deterministic_cache, seed=next(self._bulk_random), ) match.play() match_scores = match.final_score_per_turn() scores[i] += match_scores[0] scores[j] += match_scores[1] self.score_history.append(scores) return scores def population_distribution(self) -> Counter: """Returns the population distribution of the last iteration. Returns ------- counter: The counts of each strategy in the population of the last iteration """ player_names = [str(player) for player in self.players] counter = Counter(player_names) return counter def __iter__(self) -> object: """ Returns ------- self """ return self def reset(self) -> None: """Reset the process to replay.""" self.winning_strategy_name = None self.score_history = [] # Reset all the players self.set_players() def play(self) -> List[Counter]: """ Play the process out to completion. If played with mutation this will not terminate. Returns ------- populations: Returns a list of all the populations """ if not self.stop_on_fixation or self.mutation_rate != 0: raise ValueError( "MoranProcess.play() will never exit if mutation_rate is" "nonzero or stop_on_fixation is False. Use iteration instead." ) while True: try: self.__next__() except StopIteration: break return self.populations def __len__(self) -> int: """ Returns ------- The length of the Moran process: the number of populations """ return len(self.populations) def populations_plot(self, ax=None): """ Create a stackplot of the population distributions at each iteration of the Moran process. Parameters ---------------- ax: matplotlib axis Allows the plot to be written to a given matplotlib axis. Default is None. Returns ----------- A matplotlib axis object """ player_names = self.populations[0].keys() if ax is None: _, ax = plt.subplots() else: ax = ax plot_data = [] labels = [] for name in player_names: labels.append(name) values = [counter[name] for counter in self.populations] plot_data.append(values) domain = range(len(values)) ax.stackplot(domain, plot_data, labels=labels) ax.set_title("Moran Process Population by Iteration") ax.set_xlabel("Iteration") ax.set_ylabel("Number of Individuals") ax.legend() return ax class ApproximateMoranProcess(MoranProcess): """ A class to approximate a Moran process based on a distribution of potential Match outcomes. Instead of playing the matches, the result is sampled from a dictionary of player tuples to distribution of match outcomes """ def __init__( self, players: List[Player], cached_outcomes: dict, mutation_rate: float = 0, seed: Optional[int] = None, ) -> None: """ Parameters ---------- players: cached_outcomes: Mapping tuples of players to instances of the moran.Pdf class. mutation_rate: The rate of mutation. Replicating players are mutated with probability `mutation_rate` """ super(ApproximateMoranProcess, self).__init__( players, turns=0, noise=0, deterministic_cache=None, mutation_rate=mutation_rate, seed=seed, ) self.cached_outcomes = cached_outcomes def set_players(self) -> None: """Copy the initial players into the first population.""" self.players = [] for player in self.initial_players: player.reset() self.players.append(player) self.populations = [self.population_distribution()] def score_all(self) -> List: """Plays the next round of the process. Every player is paired up against every other player and the total scores are obtained from the cached outcomes. Returns ------- scores: List of scores for each player """ N = len(self.players) scores = [0] * N for i in range(N): for j in range(i + 1, N): player_names = tuple( [str(self.players[i]), str(self.players[j])] ) cached_score = self._get_scores_from_cache(player_names) scores[i] += cached_score[0] scores[j] += cached_score[1] self.score_history.append(scores) return scores def _get_scores_from_cache(self, player_names: Tuple) -> Tuple: """ Retrieve the scores from the players in the cache Parameters ---------- player_names: The names of the players Returns ------- scores: The scores of the players in that particular match """ try: match_scores = self.cached_outcomes[player_names].sample() return match_scores except KeyError: # If players are stored in opposite order match_scores = self.cached_outcomes[player_names[::-1]].sample() return match_scores[::-1]
PypiClean
/2Keys-0.5.1.tar.gz/2Keys-0.5.1/twokeys/add_keyboard/add_keyboard.py
import asyncio from os import path, listdir, getcwd, system import colorful from ..util.constants import KEYBOARDS_PATH_BASE, KEYBOARD_EVENT_FORMAT, KEYBOARD_EVENT_SIZE, SCRIPTS_ROOT, MODULE_NAME from ..util.logger import Logger from ..util.config import load_config from ..watcher import AsyncKeyboard as AsyncKeyboardWatcher logger = Logger("detect") # Function to add keyboards (s is emphasised) from config def add_keyboards(config): for key, value in config["keyboards"].items(): logger.info("Running script to add keyboard for keyboard " + colorful.cyan(key) + "...") print("") # Padding system("cd " + getcwd() + " && python3 -m " + MODULE_NAME + " add " + key) print("") # Padding def add_keyboard(name, gen_handler, inputs_path): # Check if paths not given config = load_config() if name == "" or name not in config["keyboards"]: logger.warn("No keyboard supplied.") logger.warn("Detection will be ran on all keyboards.") logger.warn("To just generate daemons, use the 'daemon-gen' command") logger.info("Running detection on all keyboards...") return add_keyboards(config) logger.info("Mapping keyboard " + name) logger.info("Scanning for keyboards...") if not path.isdir(inputs_path): # Make sure there's something to detect logger.err("Couldn't scan for keyboards") logger.err("Verify you have at least one keyboard plugged in") logger.err("and the dir " + inputs_path + " exists") logger.err("You can specify a custom path with the --inputs-path option") exit() # Scan # From https://stackoverflow.com/questions/3207219/how-do-i-list-all-files-of-a-directory keyboards = listdir(inputs_path) logger.debug("Keyboards:") logger.debug(keyboards) logger.info("Press a button on the keyboard you want to map to register it.") # Then watch all keyboards and ask for one to be pressed keyboards_events = [AsyncKeyboardWatcher(keyboard_path, inputs_path) for keyboard_path in keyboards] # Keyboard watch classes for each input handler = gen_handler(keyboards_events, name) # The handler needs access to keyboards_events, which it won't on exe in the watcher, as well as keyboard name # Run jobs = [keyboards_events[i].keyboard_watcher(handler) for i in range(0, len(keyboards))] # Create jobs list loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.wait(jobs))
PypiClean
/KoreaTrain-0.0.1.tar.gz/KoreaTrain-0.0.1/koreatrain/koreatrain.py
from SRT import * from korail2 import * from .tools import get_time from .constants import Platform from .dataclass import Parameter, SRParameter, KorailParameter from .errors import KoreaTrainError, KoreaTrainLoginError, KoreaTrainNotLoginError class KoreaTrain: def __init__( self, platform: Platform, parameter: Parameter | None = None, username: str | None = None, password: str | None = None, auto_login: bool = True, feedback: bool = False ) -> None: self.platform = platform self.parameter = parameter self.username = username self.password = password self.logged_in = False self.service = None if parameter is not None and not isinstance(parameter, Parameter): raise TypeError('Invalid type: `parameter` must be a Parameter instance.') if username is None and password is None: auto_login = False match platform: case Platform.SR: self.service = SRT(username, password, False, feedback) case Platform.KORAIL: self.service = Korail(username, password, False, feedback) case _: raise ValueError(f'Invalid value: invalid platform value ({platform}) has been received.') if auto_login: self.login() def __repr__(self) -> str: return f'[{type(self).__name__}] platform: {self.platform}, parameter: {self.parameter}, logged_in: {self.logged_in}.' def login(self, username: str | None = None, password: str | None = None) -> bool: if username is None: username = self.username if password is None: password = self.password if username is None or password is None: raise TypeError('Invalid type: the username or password cannot be None.') self.logged_in = self.service.login(username, password) return self.logged_in def logout(self) -> bool: self.service.logout() return True def set_parameter(self, parameter: Parameter): if isinstance(parameter, Parameter): # Check if the base class of the parameter is the Parameter. self.parameter = parameter else: raise TypeError('Invalid type: `parameter` must be a Parameter instance.') def search_train(self, parameter: Parameter | None = None, available_only: bool = True) -> list: if parameter is None: parameter = self.parameter if parameter is None or not isinstance(parameter, Parameter): raise TypeError('Invalid type: `parameter` must be a Parameter instance.') match self.platform: case Platform.SR: if type(parameter) is not SRParameter: raise TypeError('Invalid type: only SRParameter can be used when the platform is specified as SR.') return self.service.search_train( dep=parameter.dep, arr=parameter.arr, date=parameter.date, time=parameter.time, time_limit=parameter.time_limit, available_only=available_only ) case Platform.KORAIL: if type(parameter) is not KorailParameter: raise TypeError('Invalid type: only KorailParameter can be used when the platform is specified as KORAIL.') # TODO: Optimize this by calling multiple search_train instead of calling the search_train_allday. trains = self.service.search_train_allday( dep=parameter.dep, arr=parameter.arr, date=parameter.date, time=parameter.time, train_type=parameter.train_type, passengers=parameter.passengers, include_no_seats=(not available_only) ) if parameter.time is None: parameter.time = get_time() if parameter.time_limit is None: return trains else: return [train for train in trains if int(parameter.time) <= int(train.dep_time) <= int(parameter.time_limit)] def reserve(self, train, parameter: Parameter | None = None): if not self.logged_in: raise KoreaTrainNotLoginError() if parameter is None: parameter = self.parameter if parameter is None or not isinstance(parameter, Parameter): raise TypeError('Invalid type: `parameter` must be a Parameter instance.') match self.platform: case Platform.SR: if type(parameter) is not SRParameter: raise ValueError('Invalid parameter.') return self.service.reserve( train=train, passengers=parameter.passengers, special_seat=parameter.reserve_option, window_seat=parameter.window ) case Platform.KORAIL: if type(parameter) is not KorailParameter: raise ValueError('Invalid parameter.') return self.service.reserve( train=train, passengers=parameter.passengers, option=parameter.reserve_option ) def get_reservations(self, paid_only: bool = False) -> list: if not self.logged_in: raise KoreaTrainNotLoginError() match self.platform: case Platform.SR: return self.service.get_reservations(paid_only) case Platform.KORAIL: # TODO: paid_only option only works for SR. Implement one for Korail. return self.service.reservations() # def get_tickets(self): # pass def cancel(self, reservation) -> bool: if not self.logged_in: raise KoreaTrainNotLoginError() return self.service.cancel(reservation)
PypiClean
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/node_modules/jquery/src/css/showHide.js
define( [ "../core", "../data/var/dataPriv", "../css/var/isHiddenWithinTree" ], function( jQuery, dataPriv, isHiddenWithinTree ) { "use strict"; var defaultDisplayMap = {}; function getDefaultDisplay( elem ) { var temp, doc = elem.ownerDocument, nodeName = elem.nodeName, display = defaultDisplayMap[ nodeName ]; if ( display ) { return display; } temp = doc.body.appendChild( doc.createElement( nodeName ) ); display = jQuery.css( temp, "display" ); temp.parentNode.removeChild( temp ); if ( display === "none" ) { display = "block"; } defaultDisplayMap[ nodeName ] = display; return display; } function showHide( elements, show ) { var display, elem, values = [], index = 0, length = elements.length; // Determine new display value for elements that need to change for ( ; index < length; index++ ) { elem = elements[ index ]; if ( !elem.style ) { continue; } display = elem.style.display; if ( show ) { // Since we force visibility upon cascade-hidden elements, an immediate (and slow) // check is required in this first loop unless we have a nonempty display value (either // inline or about-to-be-restored) if ( display === "none" ) { values[ index ] = dataPriv.get( elem, "display" ) || null; if ( !values[ index ] ) { elem.style.display = ""; } } if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { values[ index ] = getDefaultDisplay( elem ); } } else { if ( display !== "none" ) { values[ index ] = "none"; // Remember what we're overwriting dataPriv.set( elem, "display", display ); } } } // Set the display of the elements in a second loop to avoid constant reflow for ( index = 0; index < length; index++ ) { if ( values[ index ] != null ) { elements[ index ].style.display = values[ index ]; } } return elements; } jQuery.fn.extend( { show: function() { return showHide( this, true ); }, hide: function() { return showHide( this ); }, toggle: function( state ) { if ( typeof state === "boolean" ) { return state ? this.show() : this.hide(); } return this.each( function() { if ( isHiddenWithinTree( this ) ) { jQuery( this ).show(); } else { jQuery( this ).hide(); } } ); } } ); return showHide; } );
PypiClean
/Eureqa-1.76.0.tar.gz/Eureqa-1.76.0/eureqa/analysis/components/modal.py
import os.path from eureqa.analysis.analysis_file import AnalysisFile from eureqa.analysis.components.modal_link import ModalLink from eureqa.analysis.components.base import _Component class Modal(_Component): """Represents a popup window (modal) which can contain other components. Example: # create a modal which is activated if a link is clicked text_block = TextBlock("This text is inside the modal") modal = Modal(title="This is the modal title", size='medium', icon_file_path='path/to/icon', component=text_block) h = HtmlBlock('Click this link to open a modal: {0}'.format(analysis.html_ref(modal.link('link text')))) layout.add_component(h) :param str title: The title of the modal :param str size: (optional) modal size. Options are 'small', 'medium' or 'large' :param str icon_file_path: (optional) path to an icon file to display in the corner of the modal :param Component component: The component to display in the modal """ _component_type_str = 'MODAL' def __init__(self, title=None, size=None, icon_file_path=None, component=None, _analysis=None, _component_id=None, _component_type=None): self._title = title or '' self._size = size or 'small' if icon_file_path is not None: self._icon_file_path = icon_file_path if component is not None: self._content_component = component super(Modal, self).__init__(_analysis=_analysis, _component_id=_component_id, _component_type=_component_type) # call to self._associate may have been triggered by super.__init__ above # so we set up the content component after that call, to get the component id if there is one if hasattr(self, '_content_component') and hasattr(component, '_component_id'): self._content_component_id = component._component_id @property def content_component_id(self): """The id of the component to be displayed in the modal :rtype: str """ return getattr(self, "_content_component_id", None) @property def title(self): """The title of the modal :rtype: str """ return getattr(self, "_title", None) @property def size(self): """The size of the modal :rtype: str """ return getattr(self, "_size", None) @property def icon_file_url(self): """The url of an icon file to be displayed in the corner of the modal :rtype: str """ return getattr(self, "_icon_file_url", None) @property def icon_file_id(self): """The id of an icon file to be displayed in the corner of the modal :rtype: str """ return getattr(self, "_icon_file_id", None) def _walk_children(self): if hasattr(self, "_content_component"): yield self._content_component def _register(self, analysis): updated_content = False if hasattr(self, '_content_component'): self._content_component_id = self._content_component._component_id updated_content = True if hasattr(self, '_icon_file_path') and not hasattr(self, "_icon_file_url"): icon_file_name = os.path.basename(self._icon_file_path) icon_file = open(self._icon_file_path, 'rb') self._icon_file = AnalysisFile.create(analysis, icon_file, filename=icon_file_name) self._icon_file_url = ("/api/%s/analysis/%s/files/%s" % (analysis._organization, analysis._id, self._icon_file._file_id)) self._icon_file_id = self._icon_file._file_id updated_content = True super(Modal, self)._register(analysis) def _associate_with_table(self, analysis): content_json = [] if hasattr(self, '_content_component'): content_json += self._content_component._associate_with_table(analysis) self._content_component_id = self._content_component._component_id if hasattr(self, '_icon_file_path'): icon_file_name = os.path.basename(self._icon_file_path) icon_file = open(self._icon_file_path, 'rb') self._icon_file = AnalysisFile.create(analysis, icon_file, filename=icon_file_name) self._icon_file_url = ("/api/%s/analysis/%s/files/%s" % (analysis._organization, analysis._id, self._icon_file._file_id)) self._icon_file_id = self._icon_file._file_id return super(Modal, self)._associate_with_table(analysis) + content_json def _fields(self): return super(Modal, self)._fields() + [ 'content_component_id', 'title', 'size', 'icon_file_url', 'icon_file_id' ] def link(self, link_text=None): """ Returns a ModalLink component which represents a link to this modal :param str link_text: (optional) link text to use in the ModalLink instead of default text :rtype: eureqa.analysis.components.modal_link.ModalLink """ return ModalLink(modal=self, link_text=link_text)
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/de-ch.js
;(function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' && typeof require === 'function' ? factory(require('../moment')) : typeof define === 'function' && define.amd ? define(['../moment'], factory) : factory(global.moment) }(this, (function (moment) { 'use strict'; //! moment.js locale configuration function processRelativeTime(number, withoutSuffix, key, isFuture) { var format = { m: ['eine Minute', 'einer Minute'], h: ['eine Stunde', 'einer Stunde'], d: ['ein Tag', 'einem Tag'], dd: [number + ' Tage', number + ' Tagen'], w: ['eine Woche', 'einer Woche'], M: ['ein Monat', 'einem Monat'], MM: [number + ' Monate', number + ' Monaten'], y: ['ein Jahr', 'einem Jahr'], yy: [number + ' Jahre', number + ' Jahren'], }; return withoutSuffix ? format[key][0] : format[key][1]; } var deCh = moment.defineLocale('de-ch', { months: 'Januar_Februar_März_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember'.split( '_' ), monthsShort: 'Jan._Feb._März_Apr._Mai_Juni_Juli_Aug._Sep._Okt._Nov._Dez.'.split( '_' ), monthsParseExact: true, weekdays: 'Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag'.split( '_' ), weekdaysShort: 'So_Mo_Di_Mi_Do_Fr_Sa'.split('_'), weekdaysMin: 'So_Mo_Di_Mi_Do_Fr_Sa'.split('_'), weekdaysParseExact: true, longDateFormat: { LT: 'HH:mm', LTS: 'HH:mm:ss', L: 'DD.MM.YYYY', LL: 'D. MMMM YYYY', LLL: 'D. MMMM YYYY HH:mm', LLLL: 'dddd, D. MMMM YYYY HH:mm', }, calendar: { sameDay: '[heute um] LT [Uhr]', sameElse: 'L', nextDay: '[morgen um] LT [Uhr]', nextWeek: 'dddd [um] LT [Uhr]', lastDay: '[gestern um] LT [Uhr]', lastWeek: '[letzten] dddd [um] LT [Uhr]', }, relativeTime: { future: 'in %s', past: 'vor %s', s: 'ein paar Sekunden', ss: '%d Sekunden', m: processRelativeTime, mm: '%d Minuten', h: processRelativeTime, hh: '%d Stunden', d: processRelativeTime, dd: processRelativeTime, w: processRelativeTime, ww: '%d Wochen', M: processRelativeTime, MM: processRelativeTime, y: processRelativeTime, yy: processRelativeTime, }, dayOfMonthOrdinalParse: /\d{1,2}\./, ordinal: '%d.', week: { dow: 1, // Monday is the first day of the week. doy: 4, // The week that contains Jan 4th is the first week of the year. }, }); return deCh; })));
PypiClean
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/_source/plugins/smiley/plugin.js
/* Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.plugins.add( 'smiley', { requires : [ 'dialog' ], init : function( editor ) { editor.config.smiley_path = editor.config.smiley_path || ( this.path + 'images/' ); editor.addCommand( 'smiley', new CKEDITOR.dialogCommand( 'smiley' ) ); editor.ui.addButton( 'Smiley', { label : editor.lang.smiley.toolbar, command : 'smiley' }); CKEDITOR.dialog.add( 'smiley', this.path + 'dialogs/smiley.js' ); } } ); /** * The base path used to build the URL for the smiley images. It must end with * a slash. * @name CKEDITOR.config.smiley_path * @type String * @default {@link CKEDITOR.basePath} + 'plugins/smiley/images/' * @example * config.smiley_path = 'http://www.example.com/images/smileys/'; * @example * config.smiley_path = '/images/smileys/'; */ /** * The file names for the smileys to be displayed. These files must be * contained inside the URL path defined with the * {@link CKEDITOR.config.smiley_path} setting. * @type Array * @default (see example) * @example * // This is actually the default value. * config.smiley_images = [ * 'regular_smile.gif','sad_smile.gif','wink_smile.gif','teeth_smile.gif','confused_smile.gif','tounge_smile.gif', * 'embaressed_smile.gif','omg_smile.gif','whatchutalkingabout_smile.gif','angry_smile.gif','angel_smile.gif','shades_smile.gif', * 'devil_smile.gif','cry_smile.gif','lightbulb.gif','thumbs_down.gif','thumbs_up.gif','heart.gif', * 'broken_heart.gif','kiss.gif','envelope.gif']; */ CKEDITOR.config.smiley_images = [ 'regular_smile.gif','sad_smile.gif','wink_smile.gif','teeth_smile.gif','confused_smile.gif','tounge_smile.gif', 'embaressed_smile.gif','omg_smile.gif','whatchutalkingabout_smile.gif','angry_smile.gif','angel_smile.gif','shades_smile.gif', 'devil_smile.gif','cry_smile.gif','lightbulb.gif','thumbs_down.gif','thumbs_up.gif','heart.gif', 'broken_heart.gif','kiss.gif','envelope.gif']; /** * The description to be used for each of the smileys defined in the * {@link CKEDITOR.config.smiley_images} setting. Each entry in this array list * must match its relative pair in the {@link CKEDITOR.config.smiley_images} * setting. * @type Array * @default The textual descriptions of smiley. * @example * // Default settings. * config.smiley_descriptions = * [ * 'smiley', 'sad', 'wink', 'laugh', 'frown', 'cheeky', 'blush', 'surprise', * 'indecision', 'angry', 'angel', 'cool', 'devil', 'crying', 'enlightened', 'no', * 'yes', 'heart', 'broken heart', 'kiss', 'mail' * ]; * @example * // Use textual emoticons as description. * config.smiley_descriptions = * [ * ':)', ':(', ';)', ':D', ':/', ':P', ':*)', ':-o', * ':|', '>:(', 'o:)', '8-)', '>:-)', ';(', '', '', '', * '', '', ':-*', '' * ]; */ CKEDITOR.config.smiley_descriptions = [ 'smiley', 'sad', 'wink', 'laugh', 'frown', 'cheeky', 'blush', 'surprise', 'indecision', 'angry', 'angel', 'cool', 'devil', 'crying', 'enlightened', 'no', 'yes', 'heart', 'broken heart', 'kiss', 'mail' ]; /** * The number of columns to be generated by the smilies matrix. * @name CKEDITOR.config.smiley_columns * @type Number * @default 8 * @since 3.3.2 * @example * config.smiley_columns = 6; */
PypiClean
/Flask-Mix-0.0.2.tar.gz/Flask-Mix-0.0.2/flask_mix.py
import json import os from flask import current_app class Mix: """ Mix extends a Flask app by adding a `mix` template helper for including static files managed by Laravel Mix. See https://laravel-mix.com for more information on Laravel Mix. """ def __init__(self, app=None): self.app = app self.assets = '' self.assets_url = '' if app is not None: self.init_app(app) def init_app(self, app): """ Initializes the extension. The assets and optional asset URL are loaded from disk when the app starts. In debug mode, the assets and asset url are reloaded from disk before each request. """ app.config.setdefault('MIX_ASSETS_BASE_URL', '') app.config.setdefault('MIX_MANIFEST_PATH', os.path.join(app.static_folder, 'mix-manifest.json')) app.config.setdefault('MIX_HOT_PATH', os.path.join(app.static_folder, 'hot')) self.get_assets(app) self.get_assets_base_url(app) if app.config.get('DEBUG'): app.before_request(self.reload) app.add_template_global(self.mix) def reload(self): """ Reloads the assets and assets URL from disk. Should only be used in DEBUG mode. """ self.get_assets(current_app) self.get_assets_base_url(current_app) def get_assets(self, app): """ Loads the asset map from the mix manifest file on disk. """ manifest_path = app.config.get('MIX_MANIFEST_PATH') try: with app.open_resource(manifest_path, 'r') as manifest: self.assets = json.load(manifest) except IOError: raise RuntimeError( "MIX_MANIFEST_PATH requires a valid mix-manifest.json file" ) def get_assets_base_url(self, app): """ Loads the assets url from the mix hot file on disk or from the MIX_ASSETS_BASE_URL environment variable. """ hot_path = app.config.get('MIX_HOT_PATH') try: with app.open_resource(hot_path, 'r') as hot_file: base_url = hot_file.read().strip() if base_url.endswith('/'): base_url = base_url[:-1] self.assets_base_url = base_url except: self.assets_base_url = app.config.get('MIX_ASSETS_BASE_URL') def mix(self, file): """ Method exposed to templates that can be given the path to a source asset and will return the path to built static asset. """ if not self.assets: return '' return self.assets_base_url + self.assets.get(file)
PypiClean
/ClueMapperSecure-0.6.3.tar.gz/ClueMapperSecure-0.6.3/src/clue/secure/htpasswd.py
# Adapted from: # http://svn.edgewall.org/repos/trac/trunk/contrib/htpasswd.py import crypt import os import random class HtpasswdFile: """A class for manipulating htpasswd files. >>> import tempfile >>> h, t = tempfile.mkstemp() >>> passwd = HtpasswdFile(t, True) >>> passwd.update('abc', 'def') >>> passwd.update('foo', 'bar') >>> passwd.update('abc', 'xyz') >>> passwd.delete('abc') >>> passwd.save() >>> passwd = HtpasswdFile(t, False) >>> os.remove(t) >>> passwd = HtpasswdFile(t, False) Traceback (most recent call last): Exception: ... does not exist """ SALT_CHARS = 'abcdefghijklmnopqrstuvwxyz' \ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \ '0123456789/.' def __init__(self, filename, create=False): self.entries = [] self.filename = filename if not create: if os.path.exists(self.filename): self.load() else: raise Exception("%s does not exist" % self.filename) def gen_salt(self): return random.choice(self.SALT_CHARS) + random.choice(self.SALT_CHARS) def load(self): """Read the htpasswd file into memory.""" lines = open(self.filename, 'r').readlines() self.entries = [] for line in lines: username, pwhash = line.split(':') entry = [username, pwhash.rstrip()] self.entries.append(entry) def save(self): """Write the htpasswd file to disk""" open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1]) for entry in self.entries]) def update(self, username, password): """Replace the entry for the given user, or add it if new.""" pwhash = crypt.crypt(password, self.gen_salt()) matching_entries = [entry for entry in self.entries if entry[0] == username] if matching_entries: matching_entries[0][1] = pwhash else: self.entries.append([username, pwhash]) def delete(self, username): """Remove the entry for the given user.""" self.entries = [entry for entry in self.entries if entry[0] != username]
PypiClean
/GloboNetworkAPI-0.9.6.tar.gz/GloboNetworkAPI-0.9.6/networkapiclient/ApiVipRequest.py
import urllib from networkapiclient.ApiGenericClient import ApiGenericClient from networkapiclient.utils import build_uri_with_ids class ApiVipRequest(ApiGenericClient): def __init__(self, networkapi_url, user, password, user_ldap=None, log_level='INFO'): """Class constructor receives parameters to connect to the networkAPI. :param networkapi_url: URL to access the network API. :param user: User for authentication. :param password: Password for authentication. """ super(ApiVipRequest, self).__init__( networkapi_url, user, password, user_ldap, log_level ) def list_environment_by_environmet_vip(self, environment_vip_id): """ """ uri = 'api/vip/list/environment/by/environment/vip/%s/' % ( environment_vip_id) return super(ApiVipRequest, self).get(uri) ####################### # API V3 ####################### def option_vip_by_environmentvip(self, environment_vip_id): """ List Option Vip by Environment Vip param environment_vip_id: Id of Environment Vip """ uri = 'api/v3/option-vip/environment-vip/%s/' % environment_vip_id return super(ApiVipRequest, self).get(uri) def get_vip_request_details(self, vip_request_id): """ Method to get details of vip request param vip_request_id: vip_request id """ uri = 'api/v3/vip-request/details/%s/' % vip_request_id return super(ApiVipRequest, self).get(uri) def search_vip_request_details(self, search): """ Method to list vip request param search: search """ uri = 'api/v3/vip-request/details/?%s' % urllib.urlencode( {'search': search}) return super(ApiVipRequest, self).get(uri) def get_vip_request(self, vip_request_id): """ Method to get vip request param vip_request_id: vip_request id """ uri = 'api/v3/vip-request/%s/' % vip_request_id return super(ApiVipRequest, self).get(uri) def search_vip_request(self, search): """ Method to list vip request param search: search """ uri = 'api/v3/vip-request/?%s' % urllib.urlencode({'search': search}) return super(ApiVipRequest, self).get(uri) def save_vip_request(self, vip_request): """ Method to save vip request param vip_request: vip_request object """ uri = 'api/v3/vip-request/' data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).post(uri, data) def update_vip_request(self, vip_request, vip_request_id): """ Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id """ uri = 'api/v3/vip-request/%s/' % vip_request_id data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).put(uri, data) def delete_vip_request(self, vip_request_ids): """ Method to delete vip request param vip_request_ids: vip_request ids """ uri = 'api/v3/vip-request/%s/' % vip_request_ids return super(ApiVipRequest, self).delete(uri) def create_vip(self, vip_request_ids): """ Method to create vip request param vip_request_ids: vip_request ids """ uri = 'api/v3/vip-request/deploy/%s/' % vip_request_ids return super(ApiVipRequest, self).post(uri) def update_vip(self, vip_request, vip_request_id): """ Method to update vip request param vip_request: vip_request object param vip_request_id: vip_request id """ uri = 'api/v3/vip-request/deploy/%s/' % vip_request_id data = dict() data['vips'] = list() data['vips'].append(vip_request) return super(ApiVipRequest, self).put(uri, data) def remove_vip(self, vip_request_ids): """ Method to delete vip request param vip_request_ids: vip_request ids """ uri = 'api/v3/vip-request/deploy/%s/' % vip_request_ids return super(ApiVipRequest, self).delete(uri) def search(self, **kwargs): """ Method to search vip's based on extends search. :param search: Dict containing QuerySets to find vip's. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing vip's """ return super(ApiVipRequest, self).get(self.prepare_url('api/v3/vip-request/', kwargs)) def get(self, ids, **kwargs): """ Method to get vips by their id's :param ids: List containing identifiers of vip's :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing vip's """ url = build_uri_with_ids('api/v3/vip-request/%s/', ids) return super(ApiVipRequest, self).get(self.prepare_url(url, kwargs)) def delete(self, ids): """ Method to delete vip's by their id's :param ids: Identifiers of vip's :return: None """ url = build_uri_with_ids('api/v3/vip-request/%s/', ids) return super(ApiVipRequest, self).delete(url) def update(self, vips): """ Method to update vip's :param vips: List containing vip's desired to updated :return: None """ data = {'vips': vips} vips_ids = [str(vip.get('id')) for vip in vips] return super(ApiVipRequest, self).put('api/v3/vip-request/%s/' % ';'.join(vips_ids), data) def create(self, vips): """ Method to create vip's :param vips: List containing vip's desired to be created on database :return: None """ data = {'vips': vips} return super(ApiVipRequest, self).post('api/v3/vip-request/', data) def deploy(self, ids): """ Method to deploy vip's :param vips: List containing vip's desired to be deployed on equipment :return: None """ url = build_uri_with_ids('api/v3/vip-request/deploy/%s/', ids) return super(ApiVipRequest, self).post(url) def undeploy(self, ids, clean_up=0): """ Method to undeploy vip's :param vips: List containing vip's desired to be undeployed on equipment :return: None """ url = build_uri_with_ids('api/v3/vip-request/deploy/%s/?cleanup=%s', ids, clean_up) return super(ApiVipRequest, self).delete(url) def redeploy(self, vips): """ Method to redeploy vip's :param vips: List containing vip's desired to updated on equipment :return: None """ data = {'vips': vips} vips_ids = [str(vip.get('id')) for vip in vips] return super(ApiVipRequest, self).put('api/v3/vip-request/deploy/%s/' % ';'.join(vips_ids), data)
PypiClean
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/prism/components/prism-rest.js
Prism.languages.rest = { 'table': [ { pattern: /(\s*)(?:\+[=-]+)+\+(?:\r?\n|\r)(?:\1(?:[+|].+)+[+|](?:\r?\n|\r))+\1(?:\+[=-]+)+\+/, lookbehind: true, inside: { 'punctuation': /\||(?:\+[=-]+)+\+/ } }, { pattern: /(\s*)(?:=+ +)+=+(?:(?:\r?\n|\r)\1.+)+(?:\r?\n|\r)\1(?:=+ +)+=+(?=(?:\r?\n|\r){2}|\s*$)/, lookbehind: true, inside: { 'punctuation': /[=-]+/ } } ], // Directive-like patterns 'substitution-def': { pattern: /(^\s*\.\. )\|(?:[^|\s](?:[^|]*[^|\s])?)\| [^:]+::/m, lookbehind: true, inside: { 'substitution': { pattern: /^\|(?:[^|\s]|[^|\s][^|]*[^|\s])\|/, alias: 'attr-value', inside: { 'punctuation': /^\||\|$/ } }, 'directive': { pattern: /( +)[^:]+::/, lookbehind: true, alias: 'function', inside: { 'punctuation': /::$/ } } } }, 'link-target': [ { pattern: /(^\s*\.\. )\[[^\]]+\]/m, lookbehind: true, alias: 'string', inside: { 'punctuation': /^\[|\]$/ } }, { pattern: /(^\s*\.\. )_(?:`[^`]+`|(?:[^:\\]|\\.)+):/m, lookbehind: true, alias: 'string', inside: { 'punctuation': /^_|:$/ } } ], 'directive': { pattern: /(^\s*\.\. )[^:]+::/m, lookbehind: true, alias: 'function', inside: { 'punctuation': /::$/ } }, 'comment': { // The two alternatives try to prevent highlighting of blank comments pattern: /(^\s*\.\.)(?:(?: .+)?(?:(?:\r?\n|\r).+)+| .+)(?=(?:\r?\n|\r){2}|$)/m, lookbehind: true }, 'title': [ // Overlined and underlined { pattern: /^(([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2+)(?:\r?\n|\r).+(?:\r?\n|\r)\1$/m, inside: { 'punctuation': /^[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+|[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+$/, 'important': /.+/ } }, // Underlined only { pattern: /(^|(?:\r?\n|\r){2}).+(?:\r?\n|\r)([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2+(?=\r?\n|\r|$)/, lookbehind: true, inside: { 'punctuation': /[!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]+$/, 'important': /.+/ } } ], 'hr': { pattern: /((?:\r?\n|\r){2})([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\2{3,}(?=(?:\r?\n|\r){2})/, lookbehind: true, alias: 'punctuation' }, 'field': { pattern: /(^\s*):[^:\r\n]+:(?= )/m, lookbehind: true, alias: 'attr-name' }, 'command-line-option': { pattern: /(^\s*)(?:[+-][a-z\d]|(?:--|\/)[a-z\d-]+)(?:[ =](?:[a-z][\w-]*|<[^<>]+>))?(?:, (?:[+-][a-z\d]|(?:--|\/)[a-z\d-]+)(?:[ =](?:[a-z][\w-]*|<[^<>]+>))?)*(?=(?:\r?\n|\r)? {2,}\S)/im, lookbehind: true, alias: 'symbol' }, 'literal-block': { pattern: /::(?:\r?\n|\r){2}([ \t]+).+(?:(?:\r?\n|\r)\1.+)*/, inside: { 'literal-block-punctuation': { pattern: /^::/, alias: 'punctuation' } } }, 'quoted-literal-block': { pattern: /::(?:\r?\n|\r){2}([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~]).*(?:(?:\r?\n|\r)\1.*)*/, inside: { 'literal-block-punctuation': { pattern: /^(?:::|([!"#$%&'()*+,\-.\/:;<=>?@\[\\\]^_`{|}~])\1*)/m, alias: 'punctuation' } } }, 'list-bullet': { pattern: /(^\s*)(?:[*+\-•‣⁃]|\(?(?:\d+|[a-z]|[ivxdclm]+)\)|(?:\d+|[a-z]|[ivxdclm]+)\.)(?= )/im, lookbehind: true, alias: 'punctuation' }, 'doctest-block': { pattern: /(^\s*)>>> .+(?:(?:\r?\n|\r).+)*/m, lookbehind: true, inside: { 'punctuation': /^>>>/ } }, 'inline': [ { pattern: /(^|[\s\-:\/'"<(\[{])(?::[^:]+:`.*?`|`.*?`:[^:]+:|(\*\*?|``?|\|)(?!\s).*?[^\s]\2(?=[\s\-.,:;!?\\\/'")\]}]|$))/m, lookbehind: true, inside: { 'bold': { pattern: /(^\*\*).+(?=\*\*$)/, lookbehind: true }, 'italic': { pattern: /(^\*).+(?=\*$)/, lookbehind: true }, 'inline-literal': { pattern: /(^``).+(?=``$)/, lookbehind: true, alias: 'symbol' }, 'role': { pattern: /^:[^:]+:|:[^:]+:$/, alias: 'function', inside: { 'punctuation': /^:|:$/ } }, 'interpreted-text': { pattern: /(^`).+(?=`$)/, lookbehind: true, alias: 'attr-value' }, 'substitution': { pattern: /(^\|).+(?=\|$)/, lookbehind: true, alias: 'attr-value' }, 'punctuation': /\*\*?|``?|\|/ } } ], 'link': [ { pattern: /\[[^\]]+\]_(?=[\s\-.,:;!?\\\/'")\]}]|$)/, alias: 'string', inside: { 'punctuation': /^\[|\]_$/ } }, { pattern: /(?:\b[a-z\d](?:[_.:+]?[a-z\d]+)*_?_|`[^`]+`_?_|_`[^`]+`)(?=[\s\-.,:;!?\\\/'")\]}]|$)/i, alias: 'string', inside: { 'punctuation': /^_?`|`$|`?_?_$/ } } ], // Line block start, // quote attribution, // explicit markup start, // and anonymous hyperlink target shortcut (__) 'punctuation': { pattern: /(^\s*)(?:\|(?= |$)|(?:---?|—|\.\.|__)(?= )|\.\.$)/m, lookbehind: true } };
PypiClean
/Flask-Migrate-4.0.4.tar.gz/Flask-Migrate-4.0.4/src/flask_migrate/templates/aioflask-multidb/env.py
import asyncio import logging from logging.config import fileConfig from sqlalchemy import MetaData from flask import current_app from alembic import context USE_TWOPHASE = False # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) logger = logging.getLogger('alembic.env') def get_engine(bind_key=None): try: # this works with Flask-SQLAlchemy<3 and Alchemical return current_app.extensions['migrate'].db.get_engine(bind=bind_key) except TypeError: # this works with Flask-SQLAlchemy>=3 return current_app.extensions['migrate'].db.engines.get(bind_key) def get_engine_url(bind_key=None): try: return get_engine(bind_key).url.render_as_string( hide_password=False).replace('%', '%%') except AttributeError: return str(get_engine(bind_key).url).replace('%', '%%') # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata config.set_main_option('sqlalchemy.url', get_engine_url()) bind_names = [] if current_app.config.get('SQLALCHEMY_BINDS') is not None: bind_names = list(current_app.config['SQLALCHEMY_BINDS'].keys()) else: get_bind_names = getattr(current_app.extensions['migrate'].db, 'bind_names', None) if get_bind_names: bind_names = get_bind_names() for bind in bind_names: context.config.set_section_option( bind, "sqlalchemy.url", get_engine_url(bind_key=bind)) target_db = current_app.extensions['migrate'].db # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def get_metadata(bind): """Return the metadata for a bind.""" if bind == '': bind = None if hasattr(target_db, 'metadatas'): return target_db.metadatas[bind] # legacy, less flexible implementation m = MetaData() for t in target_db.metadata.tables.values(): if t.info.get('bind_key') == bind: t.tometadata(m) return m def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ # for the --sql use case, run migrations for each URL into # individual files. engines = { '': { 'url': context.config.get_main_option('sqlalchemy.url') } } for name in bind_names: engines[name] = rec = {} rec['url'] = context.config.get_section_option(name, "sqlalchemy.url") for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) file_ = "%s.sql" % name logger.info("Writing output to %s" % file_) with open(file_, 'w') as buffer: context.configure( url=rec['url'], output_buffer=buffer, target_metadata=get_metadata(name), literal_binds=True, ) with context.begin_transaction(): context.run_migrations(engine_name=name) def do_run_migrations(_, engines): # this callback is used to prevent an auto-migration from being generated # when there are no changes to the schema # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html def process_revision_directives(context, revision, directives): if getattr(config.cmd_opts, 'autogenerate', False): script = directives[0] if len(script.upgrade_ops_list) >= len(bind_names) + 1: empty = True for upgrade_ops in script.upgrade_ops_list: if not upgrade_ops.is_empty(): empty = False if empty: directives[:] = [] logger.info('No changes in schema detected.') for name, rec in engines.items(): rec['sync_connection'] = conn = rec['connection']._sync_connection() if USE_TWOPHASE: rec['transaction'] = conn.begin_twophase() else: rec['transaction'] = conn.begin() try: for name, rec in engines.items(): logger.info("Migrating database %s" % (name or '<default>')) context.configure( connection=rec['sync_connection'], upgrade_token="%s_upgrades" % name, downgrade_token="%s_downgrades" % name, target_metadata=get_metadata(name), process_revision_directives=process_revision_directives, **current_app.extensions['migrate'].configure_args ) context.run_migrations(engine_name=name) if USE_TWOPHASE: for rec in engines.values(): rec['transaction'].prepare() for rec in engines.values(): rec['transaction'].commit() except: # noqa: E722 for rec in engines.values(): rec['transaction'].rollback() raise finally: for rec in engines.values(): rec['sync_connection'].close() async def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ # for the direct-to-DB use case, start a transaction on all # engines, then run all migrations, then commit all transactions. engines = { '': {'engine': get_engine()} } for name in bind_names: engines[name] = rec = {} rec['engine'] = get_engine(bind_key=name) for name, rec in engines.items(): engine = rec['engine'] rec['connection'] = await engine.connect().start() await engines['']['connection'].run_sync(do_run_migrations, engines) for rec in engines.values(): await rec['connection'].close() if context.is_offline_mode(): run_migrations_offline() else: asyncio.get_event_loop().run_until_complete(run_migrations_online())
PypiClean
/FreeGS-0.8.0-py3-none-any.whl/freegs/_divgeo.py
import numpy as np from freeqdsk._fileutils import ChunkOutput, write_1d, write_2d def write(data, fh, label=None): """ Write a DivGeo file, given a dictionary of data data - dictionary nx, ny Number of points in R (x), Z (y) rcentr Reference value of R bcentr Vacuum toroidal magnetic field at rcentr sibdry Poloidal flux psi at plasma boundary either: r[nx] 1D array of R values or rdim Size of the R dimension rleft Innermost R point either: z[ny] 1D array of Z values or zdim Size of the Z dimension zmid Middle of the Z dimension """ # Write header fh.write( """ jm := no. of grid points in radial direction; km := no. of grid points in vertical direction; r := radial coordinates of grid points [m]; z := vertical coordinates of grid points [m]; psi := flux per radiant at grid points [Wb/rad]; psib := psi at plasma boundary [Wb/rad]; btf := toroidal magnetic field [t]; rtf := major radius at which btf is specified [m]; jm = {nx:d} ; km = {ny:d} ; psib = {sibdry:1.15E} Wb/rad; btf = {bcentr:1.14f} t; rtf = {rcentr:1.14f} m; """.format( **data ) ) try: r = data["r"] except KeyError: # No "r" in the dictionary # use rdim, rleft and nx (from eqdsk) Rmin = data["rleft"] Rmax = data["rleft"] + data["rdim"] nx = data["nx"] dR = (Rmax - Rmin) / (nx - 1) r = np.arange(nx) * dR + Rmin try: z = data["z"] except KeyError: # No "z" in the dictionary # use zdim, zmid and ny (from eqdsk) Zmin = data["zmid"] - 0.5 * data["zdim"] Zmax = data["zmid"] + 0.5 * data["zdim"] ny = data["ny"] dZ = (Zmax - Zmin) / (ny - 1) z = np.arange(ny) * dZ + Zmin # Now write r and z fh.write(" r(1:jm);\n") co = ChunkOutput(fh, chunksize=5, extraspaces=3) write_1d(r, co) co.newline() fh.write(" \n z(1:km);\n") write_1d(z, co) co.newline() fh.write(" \n ((psi(j,k)-psib,j=1,jm),k=1,km)\n") write_2d(data["psi"] - data["sibdry"], co) co.newline()
PypiClean
/LARRY_dataset-0.0.2rc0-py3-none-any.whl/larry/_datasets/klein_lab_pp_recipe/_remove_cell_cycle_correlated_genes.py
from tqdm.notebook import tqdm import numpy as np import scipy # -- import local dependencies: -------------------------------------------------- from ._cell_cycle_genes import cell_cycle_genes from ..._utils import sparse_zscore def remove_cell_cycle_correlated_genes(adata, min_corr=0.1, key_added = "use_genes"): """ Remove signature-correlated genes from a list of test genes Arguments: ---------- E: scipy.sparse.csc_matrix, shape (n_cells, n_genes) - full counts matrix gene_list: numpy array, shape (n_genes,) - full gene list exclude_corr_genes_list: list of list(s) - Each sublist is used to build a signature. Test genes correlated with this signature will be removed test_gene_idx: 1-D numpy array - indices of genes to test for correlation with the gene signatures from exclude_corr_genes_list min_corr: float (default=0.1) - Test genes with a Pearson correlation of min_corr or higher with any of the gene sets from exclude_corr_genes_list will be excluded Returns: -------- numpy array of gene indices (subset of test_gene_idx) that are not correlated with any of the gene signatures Source: https://github.com/AllonKleinLab/SPRING_dev/blob/aa52c405b6f15efd53c66f6856799dfe46e72d01/data_prep/spring_helper.py#L307-L328 """ E = adata.X.tocsc() gene_list = adata.var["gene_ids"].tolist() test_gene_idx = adata.var.loc[adata.var["hv_genes"]].index.astype(int).tolist() exclude_corr_genes_list = cell_cycle_genes() seed_ix_list = [] for l in exclude_corr_genes_list: seed_ix_list.append( np.array([i for i in range(len(gene_list)) if gene_list[i] in l], dtype=int) ) exclude_ix = [] for iSet in tqdm(range(len(seed_ix_list))): seed_ix = seed_ix_list[iSet][ E[:, seed_ix_list[iSet]].sum(axis=0).A.squeeze() > 0 ] tmp = sparse_zscore(E[:, seed_ix.flatten()]) tmp = tmp.sum(1).A.squeeze() c = np.zeros(len(test_gene_idx)) for iG in range(len(c)): c[iG], _ = scipy.stats.pearsonr(tmp, E[:, test_gene_idx[iG]].A.squeeze()) exclude_ix.extend( [test_gene_idx[i] for i in range(len(test_gene_idx)) if (c[i]) >= min_corr] ) exclude_ix = np.array(exclude_ix) filtered_idx = np.array( [g for g in test_gene_idx if g not in exclude_ix], dtype=int ) adata.var[key_added] = adata.var.index.astype(int).isin(filtered_idx)
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methods/mongo/manage/warn.py
# ========================×======================== # Jangan Hapus Credit Ngentod # ========================×======================== from typing import Optional, Tuple import fipper class Warn: async def get_warn_action(self: "fipper.Client", chat_id: int) -> Tuple[Optional[str], bool]: warndb = self.mongo_async.warn res = await warndb.find_one({"chat_id": chat_id}) if res: try: aksi = res['action'] return aksi except KeyError: return "mute" else: return "mute" async def set_warn_action(self: "fipper.Client", chat_id: int, action: Optional[str]): warndb = self.mongo_async.warn cek = await warndb.find_one({"chat_id": chat_id, "action": action}) if cek: await warndb.update_one( {"chat_id": chat_id}, { "$set": { "action": action, }, } ) else: await warndb.insert_one( { "chat_id": chat_id, "action": action, } ) async def get_warns(self: "fipper.Client", chat_id, user_id): warndb = self.mongo_async.warn r = await warndb.find_one({"chat_id": chat_id, "user_id": user_id}) if r: try: num = r['number'] return num except KeyError: return 0 else: return 0 async def add_warns(self: "fipper.Client", chat_id, user_id, number): warndb = self.mongo_async.warn cek = await warndb.find_one({"chat_id": chat_id, "user_id": user_id}) if cek: await warndb.update_one( {"chat_id": chat_id}, { "$set": { "user_id": user_id, "number": number, }, } ) else: await warndb.insert_one( { "chat_id": chat_id, "user_id": user_id, "number": number, } ) async def reset_warns(self: "fipper.Client", chat_id, user_id): warndb = self.mongo_async.warn res = await warndb.find_one({"chat_id": chat_id, "user_id": user_id}) if res: await warndb.delete_one({"user_id": user_id}) async def get_warns_limit(self: "fipper.Client", chat_id): warndb = self.mongo_async.warn res = await warndb.find_one({"chat_id": chat_id}) if res: try: limit = res['warns_limit'] return limit except KeyError: return 3 else: return 3 async def set_warns_limit(self: "fipper.Client", chat_id, warns_limit): warndb = self.mongo_async.warn cek = await warndb.find_one({"chat_id": chat_id, "warns_limit": warns_limit}) if cek: await warndb.update_one( {"chat_id": chat_id}, { "$set": { "warns_limit": warns_limit, }, } ) else: await warndb.insert_one( { "chat_id": chat_id, "warns_limit": warns_limit, } ) async def all_warns(self: "fipper.Client", chat_id): warndb = self.mongo_async.warn r = [jo async for jo in warndb.find({"chat_id": chat_id})] if r: return r else: return []
PypiClean
/ACNLogger2-1.1.0-py3-none-any.whl/ACNLogger/__init__.py
import logging import time import os import sys import inspect from html import escape class ACNLogger: def check(self, message): if self.ENV == "DEV": return message else: newstr = message.replace("./", "") newstr = newstr.replace("..", "") return escape(newstr) def debug(self, message): self.logger.debug("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(message)) def info(self, message): self.logger.info("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(message)) def warning(self, message): self.logger.warning("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(message)) def error(self, e): self.logger.error("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(str(e))) def critical(self, e): self.logger.critical("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(str(e))) def exception(self, e): if self.ENV != "PRO": self.logger.exception("["+os.path.basename(inspect.stack()[1].filename)+"] ["+self.ENV+"] ["+self.oid+"] ["+self.session+"] ["+self.correlationId+"] " + self.check(str(e))) else: self.error(e) def setSession(self, session): self.session = session def setCorrelationId(self, correlationId): self.correlationId = correlationId def setOId(self, oid): self.oid = oid def __init__(self,name,file=None,console_level="debug",logfile_level="debug"): #file = file or name+".log" _logLevelMap = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical":logging.CRITICAL } acn_logger=logging.getLogger(name) # Creating the new logger acn_logger.setLevel(logging.DEBUG) # Setting new logger level to INFO or above acn_logger.propagate = False console_handler=logging.StreamHandler() console_handler.setLevel(_logLevelMap[console_level]) #file_handler=logging.FileHandler(file) #file_handler.setLevel(_logLevelMap[logfile_level]) #acn_logger.addHandler(file_handler) #Adding file handler to the new logger acn_logger.addHandler(console_handler) formatter=logging.Formatter('[%(asctime)s] [%(levelname)s] [%(process)d] %(message)s') #Creating a formatter #file_handler.setFormatter(formatter) #Setting handler format console_handler.setFormatter(formatter) self.session = "UNDEFINED" self.correlationId = "UNDEFINED" self.oid = "UNDEFINED" self.logger=acn_logger try: self.ENV = os.environ["ENV"] except: self.ENV = "ENV NOT SET" self.warning("Environment variable ENV not set") self.info("STARTING MICROSERVICE")
PypiClean
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/@types/node/zlib.d.ts
declare module 'zlib' { import * as stream from 'node:stream'; interface ZlibOptions { /** * @default constants.Z_NO_FLUSH */ flush?: number | undefined; /** * @default constants.Z_FINISH */ finishFlush?: number | undefined; /** * @default 16*1024 */ chunkSize?: number | undefined; windowBits?: number | undefined; level?: number | undefined; // compression only memLevel?: number | undefined; // compression only strategy?: number | undefined; // compression only dictionary?: NodeJS.ArrayBufferView | ArrayBuffer | undefined; // deflate/inflate only, empty dictionary by default info?: boolean | undefined; maxOutputLength?: number | undefined; } interface BrotliOptions { /** * @default constants.BROTLI_OPERATION_PROCESS */ flush?: number | undefined; /** * @default constants.BROTLI_OPERATION_FINISH */ finishFlush?: number | undefined; /** * @default 16*1024 */ chunkSize?: number | undefined; params?: | { /** * Each key is a `constants.BROTLI_*` constant. */ [key: number]: boolean | number; } | undefined; maxOutputLength?: number | undefined; } interface Zlib { /** @deprecated Use bytesWritten instead. */ readonly bytesRead: number; readonly bytesWritten: number; shell?: boolean | string | undefined; close(callback?: () => void): void; flush(kind?: number, callback?: () => void): void; flush(callback?: () => void): void; } interface ZlibParams { params(level: number, strategy: number, callback: () => void): void; } interface ZlibReset { reset(): void; } interface BrotliCompress extends stream.Transform, Zlib {} interface BrotliDecompress extends stream.Transform, Zlib {} interface Gzip extends stream.Transform, Zlib {} interface Gunzip extends stream.Transform, Zlib {} interface Deflate extends stream.Transform, Zlib, ZlibReset, ZlibParams {} interface Inflate extends stream.Transform, Zlib, ZlibReset {} interface DeflateRaw extends stream.Transform, Zlib, ZlibReset, ZlibParams {} interface InflateRaw extends stream.Transform, Zlib, ZlibReset {} interface Unzip extends stream.Transform, Zlib {} /** * Creates and returns a new `BrotliCompress` object. * @since v11.7.0, v10.16.0 */ function createBrotliCompress(options?: BrotliOptions): BrotliCompress; /** * Creates and returns a new `BrotliDecompress` object. * @since v11.7.0, v10.16.0 */ function createBrotliDecompress(options?: BrotliOptions): BrotliDecompress; /** * Creates and returns a new `Gzip` object. * See `example`. * @since v0.5.8 */ function createGzip(options?: ZlibOptions): Gzip; /** * Creates and returns a new `Gunzip` object. * @since v0.5.8 */ function createGunzip(options?: ZlibOptions): Gunzip; /** * Creates and returns a new `Deflate` object. * @since v0.5.8 */ function createDeflate(options?: ZlibOptions): Deflate; /** * Creates and returns a new `Inflate` object. * @since v0.5.8 */ function createInflate(options?: ZlibOptions): Inflate; /** * Creates and returns a new `DeflateRaw` object. * * An upgrade of zlib from 1.2.8 to 1.2.11 changed behavior when `windowBits`is set to 8 for raw deflate streams. zlib would automatically set `windowBits`to 9 if was initially set to 8\. Newer * versions of zlib will throw an exception, * so Node.js restored the original behavior of upgrading a value of 8 to 9, * since passing `windowBits = 9` to zlib actually results in a compressed stream * that effectively uses an 8-bit window only. * @since v0.5.8 */ function createDeflateRaw(options?: ZlibOptions): DeflateRaw; /** * Creates and returns a new `InflateRaw` object. * @since v0.5.8 */ function createInflateRaw(options?: ZlibOptions): InflateRaw; /** * Creates and returns a new `Unzip` object. * @since v0.5.8 */ function createUnzip(options?: ZlibOptions): Unzip; type InputType = string | ArrayBuffer | NodeJS.ArrayBufferView; type CompressCallback = (error: Error | null, result: Buffer) => void; /** * @since v11.7.0, v10.16.0 */ function brotliCompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void; function brotliCompress(buf: InputType, callback: CompressCallback): void; namespace brotliCompress { function __promisify__(buffer: InputType, options?: BrotliOptions): Promise<Buffer>; } /** * Compress a chunk of data with `BrotliCompress`. * @since v11.7.0, v10.16.0 */ function brotliCompressSync(buf: InputType, options?: BrotliOptions): Buffer; /** * @since v11.7.0, v10.16.0 */ function brotliDecompress(buf: InputType, options: BrotliOptions, callback: CompressCallback): void; function brotliDecompress(buf: InputType, callback: CompressCallback): void; namespace brotliDecompress { function __promisify__(buffer: InputType, options?: BrotliOptions): Promise<Buffer>; } /** * Decompress a chunk of data with `BrotliDecompress`. * @since v11.7.0, v10.16.0 */ function brotliDecompressSync(buf: InputType, options?: BrotliOptions): Buffer; /** * @since v0.6.0 */ function deflate(buf: InputType, callback: CompressCallback): void; function deflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace deflate { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Compress a chunk of data with `Deflate`. * @since v0.11.12 */ function deflateSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function deflateRaw(buf: InputType, callback: CompressCallback): void; function deflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace deflateRaw { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Compress a chunk of data with `DeflateRaw`. * @since v0.11.12 */ function deflateRawSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function gzip(buf: InputType, callback: CompressCallback): void; function gzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace gzip { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Compress a chunk of data with `Gzip`. * @since v0.11.12 */ function gzipSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function gunzip(buf: InputType, callback: CompressCallback): void; function gunzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace gunzip { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Decompress a chunk of data with `Gunzip`. * @since v0.11.12 */ function gunzipSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function inflate(buf: InputType, callback: CompressCallback): void; function inflate(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace inflate { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Decompress a chunk of data with `Inflate`. * @since v0.11.12 */ function inflateSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function inflateRaw(buf: InputType, callback: CompressCallback): void; function inflateRaw(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace inflateRaw { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Decompress a chunk of data with `InflateRaw`. * @since v0.11.12 */ function inflateRawSync(buf: InputType, options?: ZlibOptions): Buffer; /** * @since v0.6.0 */ function unzip(buf: InputType, callback: CompressCallback): void; function unzip(buf: InputType, options: ZlibOptions, callback: CompressCallback): void; namespace unzip { function __promisify__(buffer: InputType, options?: ZlibOptions): Promise<Buffer>; } /** * Decompress a chunk of data with `Unzip`. * @since v0.11.12 */ function unzipSync(buf: InputType, options?: ZlibOptions): Buffer; namespace constants { const BROTLI_DECODE: number; const BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: number; const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: number; const BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: number; const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: number; const BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: number; const BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: number; const BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: number; const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: number; const BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: number; const BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: number; const BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: number; const BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: number; const BROTLI_DECODER_ERROR_FORMAT_DISTANCE: number; const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: number; const BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: number; const BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: number; const BROTLI_DECODER_ERROR_FORMAT_PADDING_1: number; const BROTLI_DECODER_ERROR_FORMAT_PADDING_2: number; const BROTLI_DECODER_ERROR_FORMAT_RESERVED: number; const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: number; const BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: number; const BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: number; const BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: number; const BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: number; const BROTLI_DECODER_ERROR_UNREACHABLE: number; const BROTLI_DECODER_NEEDS_MORE_INPUT: number; const BROTLI_DECODER_NEEDS_MORE_OUTPUT: number; const BROTLI_DECODER_NO_ERROR: number; const BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: number; const BROTLI_DECODER_PARAM_LARGE_WINDOW: number; const BROTLI_DECODER_RESULT_ERROR: number; const BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: number; const BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: number; const BROTLI_DECODER_RESULT_SUCCESS: number; const BROTLI_DECODER_SUCCESS: number; const BROTLI_DEFAULT_MODE: number; const BROTLI_DEFAULT_QUALITY: number; const BROTLI_DEFAULT_WINDOW: number; const BROTLI_ENCODE: number; const BROTLI_LARGE_MAX_WINDOW_BITS: number; const BROTLI_MAX_INPUT_BLOCK_BITS: number; const BROTLI_MAX_QUALITY: number; const BROTLI_MAX_WINDOW_BITS: number; const BROTLI_MIN_INPUT_BLOCK_BITS: number; const BROTLI_MIN_QUALITY: number; const BROTLI_MIN_WINDOW_BITS: number; const BROTLI_MODE_FONT: number; const BROTLI_MODE_GENERIC: number; const BROTLI_MODE_TEXT: number; const BROTLI_OPERATION_EMIT_METADATA: number; const BROTLI_OPERATION_FINISH: number; const BROTLI_OPERATION_FLUSH: number; const BROTLI_OPERATION_PROCESS: number; const BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: number; const BROTLI_PARAM_LARGE_WINDOW: number; const BROTLI_PARAM_LGBLOCK: number; const BROTLI_PARAM_LGWIN: number; const BROTLI_PARAM_MODE: number; const BROTLI_PARAM_NDIRECT: number; const BROTLI_PARAM_NPOSTFIX: number; const BROTLI_PARAM_QUALITY: number; const BROTLI_PARAM_SIZE_HINT: number; const DEFLATE: number; const DEFLATERAW: number; const GUNZIP: number; const GZIP: number; const INFLATE: number; const INFLATERAW: number; const UNZIP: number; // Allowed flush values. const Z_NO_FLUSH: number; const Z_PARTIAL_FLUSH: number; const Z_SYNC_FLUSH: number; const Z_FULL_FLUSH: number; const Z_FINISH: number; const Z_BLOCK: number; const Z_TREES: number; // Return codes for the compression/decompression functions. // Negative values are errors, positive values are used for special but normal events. const Z_OK: number; const Z_STREAM_END: number; const Z_NEED_DICT: number; const Z_ERRNO: number; const Z_STREAM_ERROR: number; const Z_DATA_ERROR: number; const Z_MEM_ERROR: number; const Z_BUF_ERROR: number; const Z_VERSION_ERROR: number; // Compression levels. const Z_NO_COMPRESSION: number; const Z_BEST_SPEED: number; const Z_BEST_COMPRESSION: number; const Z_DEFAULT_COMPRESSION: number; // Compression strategy. const Z_FILTERED: number; const Z_HUFFMAN_ONLY: number; const Z_RLE: number; const Z_FIXED: number; const Z_DEFAULT_STRATEGY: number; const Z_DEFAULT_WINDOWBITS: number; const Z_MIN_WINDOWBITS: number; const Z_MAX_WINDOWBITS: number; const Z_MIN_CHUNK: number; const Z_MAX_CHUNK: number; const Z_DEFAULT_CHUNK: number; const Z_MIN_MEMLEVEL: number; const Z_MAX_MEMLEVEL: number; const Z_DEFAULT_MEMLEVEL: number; const Z_MIN_LEVEL: number; const Z_MAX_LEVEL: number; const Z_DEFAULT_LEVEL: number; const ZLIB_VERNUM: number; } // Allowed flush values. /** @deprecated Use `constants.Z_NO_FLUSH` */ const Z_NO_FLUSH: number; /** @deprecated Use `constants.Z_PARTIAL_FLUSH` */ const Z_PARTIAL_FLUSH: number; /** @deprecated Use `constants.Z_SYNC_FLUSH` */ const Z_SYNC_FLUSH: number; /** @deprecated Use `constants.Z_FULL_FLUSH` */ const Z_FULL_FLUSH: number; /** @deprecated Use `constants.Z_FINISH` */ const Z_FINISH: number; /** @deprecated Use `constants.Z_BLOCK` */ const Z_BLOCK: number; /** @deprecated Use `constants.Z_TREES` */ const Z_TREES: number; // Return codes for the compression/decompression functions. // Negative values are errors, positive values are used for special but normal events. /** @deprecated Use `constants.Z_OK` */ const Z_OK: number; /** @deprecated Use `constants.Z_STREAM_END` */ const Z_STREAM_END: number; /** @deprecated Use `constants.Z_NEED_DICT` */ const Z_NEED_DICT: number; /** @deprecated Use `constants.Z_ERRNO` */ const Z_ERRNO: number; /** @deprecated Use `constants.Z_STREAM_ERROR` */ const Z_STREAM_ERROR: number; /** @deprecated Use `constants.Z_DATA_ERROR` */ const Z_DATA_ERROR: number; /** @deprecated Use `constants.Z_MEM_ERROR` */ const Z_MEM_ERROR: number; /** @deprecated Use `constants.Z_BUF_ERROR` */ const Z_BUF_ERROR: number; /** @deprecated Use `constants.Z_VERSION_ERROR` */ const Z_VERSION_ERROR: number; // Compression levels. /** @deprecated Use `constants.Z_NO_COMPRESSION` */ const Z_NO_COMPRESSION: number; /** @deprecated Use `constants.Z_BEST_SPEED` */ const Z_BEST_SPEED: number; /** @deprecated Use `constants.Z_BEST_COMPRESSION` */ const Z_BEST_COMPRESSION: number; /** @deprecated Use `constants.Z_DEFAULT_COMPRESSION` */ const Z_DEFAULT_COMPRESSION: number; // Compression strategy. /** @deprecated Use `constants.Z_FILTERED` */ const Z_FILTERED: number; /** @deprecated Use `constants.Z_HUFFMAN_ONLY` */ const Z_HUFFMAN_ONLY: number; /** @deprecated Use `constants.Z_RLE` */ const Z_RLE: number; /** @deprecated Use `constants.Z_FIXED` */ const Z_FIXED: number; /** @deprecated Use `constants.Z_DEFAULT_STRATEGY` */ const Z_DEFAULT_STRATEGY: number; /** @deprecated */ const Z_BINARY: number; /** @deprecated */ const Z_TEXT: number; /** @deprecated */ const Z_ASCII: number; /** @deprecated */ const Z_UNKNOWN: number; /** @deprecated */ const Z_DEFLATED: number; } declare module 'node:zlib' { export * from 'zlib'; }
PypiClean
/CustomPipeline-0.0.3-py3-none-any.whl/rpcore/rpobject.py
from __future__ import print_function import sys # Load and init colorama, used to color the output from rplibs.colorama import init as init_colorama from rplibs.colorama import Fore, Style init_colorama() class RPObject(object): """ This is the base class for every object in the render pipeline. It provides the functions debug, warn, error and fatal for classes which inherit from this object, including the name of the class when printing out the message. """ _OUTPUT_LEVEL = 0 _OUTPUT_LEVELS = ["debug", "warning", "error", "fatal"] @classmethod def set_output_level(cls, level): """ Sets the output level, messages with a level below will not be printed. E.g. if you set the output level to "error", only error and fatal messages will be shown. """ assert level in RPObject._OUTPUT_LEVELS RPObject._OUTPUT_LEVEL = RPObject._OUTPUT_LEVELS.index(level) @staticmethod def global_debug(context, *args, **kwargs): """ This method can be used from a static context to print a debug message. The first argument should be the name of the object / context, all other arguments should be the message. """ if RPObject._OUTPUT_LEVEL > 0: return print(kwargs.get("color", Fore.GREEN) + "[>] " + context.ljust(25) + " " + Style.RESET_ALL + Fore.WHITE + ' '.join([str(i) for i in args]), Fore.RESET + Style.RESET_ALL) @staticmethod def global_warn(context, *args): """ This method can be used from a static context to print a warning. The first argument should be the name of the object / context, all other arguments should be the message. """ if RPObject._OUTPUT_LEVEL > 1: return print(Fore.YELLOW + Style.BRIGHT + "[!] " + context.ljust(25) + Fore.YELLOW + Style.BRIGHT + " " + ' '.join([str(i) for i in args]) + Fore.RESET + Style.RESET_ALL) @staticmethod def global_error(context, *args): """ This method can be used from a static context to print an error. The first argument should be the name of the object / context, all other arguments should be the message. """ if RPObject._OUTPUT_LEVEL > 2: return print(Fore.RED + Style.BRIGHT + "\n[!!!] " + context.ljust(23) + " " + ' '.join([str(i) for i in args]) + "\n" + Fore.RESET + Style.RESET_ALL) def __init__(self, name=None): """ Initiates the RPObject with a given name. The name should be representative about the class. If no name is given, the classname is used """ if name is None: name = self.__class__.__name__ self._debug_name = name self._debug_color = Fore.GREEN def _set_debug_color(self, color, style=None): """ Sets the color used to output debug messages """ self._debug_color = getattr(Fore, color.upper()) if style: self._debug_color += getattr(Style, style.upper()) @property def debug_name(self): """ Returns the name of the debug object """ return self._debug_name @debug_name.setter def debug_name(self, name): """ Renames this object """ self._debug_name = name def debug(self, *args): """ Outputs a debug message, something that is not necessarry but provides useful information for the developer """ self.global_debug(self._debug_name, *args, color=self._debug_color) def warn(self, *args): """ Outputs a warning message, something that failed or does not work, but does not prevent the program from running """ self.global_warn(self._debug_name, *args) def error(self, *args): """ Outputs an error message, something really serious. Hopefully this never get's called! """ self.global_error(self._debug_name, *args) def fatal(self, *args): """ Outputs a fatal error message, printing out the errors and then calling sys.exit to terminate the program. This method should be called when something failed so hard that the pipeline has to exit. """ # We have to set output level to 0 here, so we can print out errors RPObject._OUTPUT_LEVEL = 0 self.error(*args) sys.exit(0) def __repr__(self): """ Represents this object. Subclasses can override this """ return self._debug_name
PypiClean
/Flask-Scaffold-0.5.1.tar.gz/Flask-Scaffold-0.5.1/app/templates/static/node_modules/angular-grid/src/ts/virtualDom/vHtmlElement.ts
module awk.vdom { var _ = awk.grid.Utils; export class VHtmlElement extends VElement { private type: string; private classes: string[]; private eventListeners: VEventListener[]; private attributes: {[key: string]: string}; private children: VElement[]; private innerHtml: string; private style = <any>{}; private bound: boolean; private element: HTMLElement; constructor(type: string) { super(); this.type = type; } public getElement(): HTMLElement { return this.element; } public setInnerHtml(innerHtml: string): void { if (this.bound) { this.element.innerHTML = innerHtml; } else { this.innerHtml = innerHtml; } } public addStyles(styles: any): void { if (!styles) { return; } if (!this.bound && !this.style) { this.style = {}; } _.iterateObject(styles, (key: string, value: string)=> { if (this.bound) { var style = <any> this.element.style; style[key] = value; } else { this.style[key] = value; } }); } private attachEventListeners(node: Node): void { if (!this.eventListeners) { return; } for (var i = 0; i<this.eventListeners.length; i++) { var listener = this.eventListeners[i]; node.addEventListener(listener.event, listener.listener); } } public addClass(newClass: string): void { if (this.bound) { _.addCssClass(this.element, newClass); } else { if (!this.classes) { this.classes = []; } this.classes.push(newClass); } } public removeClass(oldClass: string): void { if (this.bound) { _.removeCssClass(this.element, oldClass); } else { if (!this.classes) { return; } while (this.classes.indexOf(oldClass)>=0) { _.removeFromArray(this.classes, oldClass); } } } public addClasses(classes: string[]): void { if (!classes || classes.length <= 0) { return; } if (this.bound) { for (var i = 0; i<classes.length; i++) { _.addCssClass(this.element, classes[i]) } } else { if (!this.classes) { this.classes = []; } for (var j = 0; j<classes.length; j++) { this.classes.push(classes[j]); } } } public toHtmlString(): string { var buff = ''; // opening element buff += '<' + this.type + ' v_element_id="'+this.getId()+'" '; buff += this.toHtmlStringClasses(); buff += this.toHtmlStringAttributes(); buff += this.toHtmlStringStyles(); buff += '>'; // contents if (this.innerHtml) { buff += this.innerHtml; } buff += this.toHtmlStringChildren(); // closing element buff += '</' + this.type + '>'; return buff; } private toHtmlStringChildren(): string { if (!this.children) { return ''; } var result = ''; for (var i = 0; i<this.children.length; i++) { result += this.children[i].toHtmlString(); } return result; } private toHtmlStringAttributes(): string { if (!this.attributes) { return ''; } var result = ''; _.iterateObject(this.attributes, (key: string, value: string)=> { result += ' ' + key + '="'+value+'"'; }); return result; } private toHtmlStringClasses(): string { if (!this.classes) { return ''; } return ' class="'+this.classes.join(' ')+'"'; } private toHtmlStringStyles(): string { var result = ' style="'; var atLeastOne = false; _.iterateObject(this.style, (key: string, value: string)=> { result += ' ' + key + ': '+value+';'; atLeastOne = true; }); result += '"'; if (atLeastOne) { return result; } else { return ''; } } public appendChild(child: any) { if (this.bound) { if (_.isNodeOrElement(child)){ this.element.appendChild(child); } else { console.error('cannot appendChild with virtual child to already bound VHTMLElement'); } } else { if (!this.children) { this.children = []; } if (_.isNodeOrElement(child)) { this.children.push(new VWrapperElement(child)); } else { this.children.push(child); } } } public setAttribute(key: string, value: string) { if (this.bound) { console.error('cannot setAttribute to already bound VHTMLElement'); } if (!this.attributes) { this.attributes = {}; } this.attributes[key] = value; } public addEventListener(event: string, listener: EventListener) { if (this.bound) { console.error('cannot addEventListener to already bound VHTMLElement'); } if (!this.eventListeners) { this.eventListeners = []; } var entry = new VEventListener(event, listener); this.eventListeners.push(entry); } public elementAttached(element: Element): void { super.elementAttached(element); this.element = <HTMLElement> element; this.attachEventListeners(element); this.fireElementAttachedToChildren(element); this.bound = true; } public fireElementAttachedToChildren(element: Element) { if (!this.children) { return; } for (var i = 0; i<this.children.length; i++) { var child = this.children[i]; var childElement = element.querySelector('[v_element_id="'+child.getId()+'"]'); child.elementAttached(childElement); } } } class VEventListener { constructor(public event: string, public listener: EventListener) {} } }
PypiClean
/FireWorks-2.0.3.tar.gz/FireWorks-2.0.3/fireworks/user_objects/firetasks/dataflow_tasks.py
__author__ = "Ivan Kondov" __email__ = "[email protected]" __copyright__ = "Copyright 2016, Karlsruhe Institute of Technology" import sys from fireworks import Firework from fireworks.core.firework import FiretaskBase, FWAction from fireworks.utilities.fw_serializers import load_object if sys.version_info[0] > 2: basestring = str class CommandLineTask(FiretaskBase): """ A Firetask to execute external commands in a shell Required params: - command_spec (dict): a dictionary specification of the command (see below for details) Optional params: - inputs ([str]): list of labels, one for each input argument - outputs ([str]): list of labels, one for each output argument - chunk_number (int): the serial number of the firetask when it is part of a series generated by a ForeachTask command_spec = { 'command': [str], # mandatory label_1: dict_1, # optional label_2: dict_2, # optional ... } The 'command' is a representation of the command as to be used with subprocess package. The optional keys label_1, label_2, etc. are the actual labels used in the inputs and outputs. The dictionaries dict_1, dict_2, etc. have the following schema: { 'binding': { prefix: str or None, separator: str or None }, 'source': { 'type': 'path' or 'data' or 'identifier' or 'stdin' or 'stdout' or 'stderr' or None, 'value': str or int or float }, 'target': { 'type': 'path' or 'data' or 'identifier' or 'stdin' or 'stdout' or 'stderr' or None, 'value': str } } Remarks ------- * If the 'type' in the 'source' field is 'data' the 'value' can be of types 'str', 'int' and 'float'. * When a *str* is found instead of *dict* for some 'source', for example 'source': 'string', 'string' is replaced with spec['string'] which must be available and of the schema of the 'source'. * When a *str* is found instead of *dict* for some label, for example label: 'string', 'string' is replaced with spec['string'] which can be a dictionary with this schema or a list of such dictionaries. """ _fw_name = "CommandLineTask" required_params = ["command_spec"] optional_params = ["inputs", "outputs", "chunk_number"] def run_task(self, fw_spec): cmd_spec = self["command_spec"] ilabels = self.get("inputs") olabels = self.get("outputs") if ilabels is None: ilabels = [] else: assert isinstance(ilabels, list), '"inputs" must be a list' if olabels is None: olabels = [] else: assert isinstance(olabels, list), '"outputs" must be a list' inputs = [] outputs = [] for ios, labels in zip([inputs, outputs], [ilabels, olabels]): # cmd_spec: {label: {{binding: {}}, {source: {}}, {target: {}}}} for label in labels: if isinstance(cmd_spec[label], basestring): inp = [] for item in fw_spec[cmd_spec[label]]: if "source" in item: inp.append(item) else: inp.append({"source": item}) else: inp = {} for key in ["binding", "source", "target"]: if key in cmd_spec[label]: item = cmd_spec[label][key] if isinstance(item, basestring): inp[key] = fw_spec[item] elif isinstance(item, dict): inp[key] = item else: raise ValueError ios.append(inp) command = cmd_spec["command"] outlist = self.command_line_tool(command, inputs, outputs) if len(outlist) > 0: if self.get("chunk_number") is not None: mod_spec = [] if len(olabels) > 1: assert len(olabels) == len(outlist) for olab, out in zip(olabels, outlist): for item in out: mod_spec.append({"_push": {olab: item}}) else: for out in outlist: mod_spec.append({"_push": {olabels[0]: out}}) return FWAction(mod_spec=mod_spec) else: output_dict = {} for olab, out in zip(olabels, outlist): output_dict[olab] = out return FWAction(update_spec=output_dict) else: return FWAction() @staticmethod def command_line_tool(command, inputs=None, outputs=None): """ This function composes and executes a command from provided specifications. Required parameters: - command ([str]): the command as to be passed to subprocess.Popen Optional parameters: - inputs ([dict, [dict]]): list of the specifications for inputs; multiple inputs may be passed in one list of dictionaries - outputs ([dict]): list of the specifications for outputs Returns: - list of target dictionaries for each output: 'target': { 'type': 'path' or 'data' or 'identifier' or 'stdin' or 'stdout' or 'stderr' or None 'value': str } If outputs is None then an empty list is returned. """ import os import uuid from shutil import copyfile from subprocess import PIPE, Popen def set_binding(arg): argstr = "" if "binding" in arg: if "prefix" in arg["binding"]: argstr += arg["binding"]["prefix"] if "separator" in arg["binding"]: argstr += arg["binding"]["separator"] return argstr arglist = command stdin = None stdout = None stderr = PIPE stdininp = None if inputs is not None: for inp in inputs: argl = inp if isinstance(inp, list) else [inp] for arg in argl: argstr = set_binding(arg) assert "source" in arg, 'input has no key "source"' assert arg["source"]["type"] is not None and arg["source"]["value"] is not None if "target" in arg: assert arg["target"] is not None assert arg["target"]["type"] == "stdin" if arg["source"]["type"] == "path": stdin = open(arg["source"]["value"]) elif arg["source"]["type"] == "data": stdin = PIPE stdininp = str(arg["source"]["value"]).encode() else: # filepad raise NotImplementedError() else: if arg["source"]["type"] == "path": argstr += arg["source"]["value"] elif arg["source"]["type"] == "data": argstr += str(arg["source"]["value"]) else: # filepad raise NotImplementedError() if len(argstr) > 0: arglist.append(argstr) if outputs is not None: for arg in outputs: if isinstance(arg, list): arg = arg[0] argstr = set_binding(arg) assert "target" in arg assert arg["target"] is not None if arg["target"]["type"] == "path": assert "value" in arg["target"] assert len(arg["target"]["value"]) > 0 path = arg["target"]["value"] if os.path.isdir(path): path = os.path.join(path, str(uuid.uuid4())) arg["target"]["value"] = path if "source" in arg: assert arg["source"] is not None assert "type" in arg["source"] if arg["source"]["type"] == "stdout": stdout = open(path, "w") elif arg["source"]["type"] == "stderr": stderr = open(path, "w") elif arg["source"]["type"] == "path": pass else: argstr += path else: argstr += path elif arg["target"]["type"] == "data": stdout = PIPE else: # filepad raise NotImplementedError() if len(argstr) > 0: arglist.append(argstr) proc = Popen(arglist, stdin=stdin, stderr=stderr, stdout=stdout) res = proc.communicate(input=stdininp) if proc.returncode != 0: err = res[1] if len(res) > 1 else "" raise RuntimeError(err) retlist = [] if outputs is not None: for output in outputs: if "source" in output and output["source"]["type"] == "path": copyfile(output["source"]["value"], output["target"]["value"]) if output["target"]["type"] == "data": output["target"]["value"] = res[0].decode().strip() retlist.append(output["target"]) return retlist class ForeachTask(FiretaskBase): """ This firetask branches the workflow creating parallel fireworks using FWAction: one firework for each element or each chunk from the *split* list. Each firework in this generated list contains the firetask specified in the *task* dictionary. If the number of chunks is specified the *split* list will be divided into this number of chunks and each chunk will be processed by one of the generated child fireworks. Required params: - task (dict): a dictionary version of the firetask - split (str): a label of an input list; it must be available both in the *inputs* list of the specified task and in the spec. Optional params: - number of chunks (int): if provided the *split* input list will be divided into this number of sublists and each will be processed by a separate child firework """ _fw_name = "ForeachTask" required_params = ["task", "split"] optional_params = ["number of chunks"] def run_task(self, fw_spec): assert isinstance(self["split"], basestring), self["split"] assert isinstance(fw_spec[self["split"]], list) if isinstance(self["task"]["inputs"], list): assert self["split"] in self["task"]["inputs"] else: assert self["split"] == self["task"]["inputs"] split_field = fw_spec[self["split"]] lensplit = len(split_field) assert lensplit != 0, ("input to split is empty:", self["split"]) nchunks = self.get("number of chunks") if not nchunks: nchunks = lensplit chunklen = lensplit // nchunks if lensplit % nchunks > 0: chunklen = chunklen + 1 chunks = [split_field[i : i + chunklen] for i in range(0, lensplit, chunklen)] fireworks = [] for index, chunk in enumerate(chunks): spec = fw_spec.copy() spec[self["split"]] = chunk task = load_object(self["task"]) task["chunk_number"] = index name = f"{self._fw_name} {index}" fireworks.append(Firework(task, spec=spec, name=name)) return FWAction(detours=fireworks) class JoinDictTask(FiretaskBase): """combines specified spec fields into a dictionary""" _fw_name = "JoinDictTask" required_params = ["inputs", "output"] optional_params = ["rename"] def run_task(self, fw_spec): assert isinstance(self["output"], basestring) assert isinstance(self["inputs"], list) if self["output"] not in fw_spec: output = {} else: assert isinstance(fw_spec[self["output"]], dict) output = fw_spec[self["output"]] if self.get("rename"): assert isinstance(self.get("rename"), dict) rename = self.get("rename") else: rename = {} for item in self["inputs"]: if item in rename: output[self["rename"][item]] = fw_spec[item] else: output[item] = fw_spec[item] return FWAction(update_spec={self["output"]: output}) class JoinListTask(FiretaskBase): """combines specified spec fields into a list.""" _fw_name = "JoinListTask" required_params = ["inputs", "output"] def run_task(self, fw_spec): assert isinstance(self["output"], basestring) assert isinstance(self["inputs"], list) if self["output"] not in fw_spec: output = [] else: assert isinstance(fw_spec[self["output"]], list) output = fw_spec[self["output"]] for item in self["inputs"]: output.append(fw_spec[item]) return FWAction(update_spec={self["output"]: output}) class ImportDataTask(FiretaskBase): """ Update the spec with data from file in a nested dictionary at a position specified by a mapstring = maplist[0]/maplist[1]/... i.e. spec[maplist[0]][maplist[1]]... = data """ _fw_name = "ImportDataTask" required_params = ["filename", "mapstring"] optional_params = [] def run_task(self, fw_spec): import json import operator from functools import reduce import ruamel.yaml as yaml filename = self["filename"] mapstring = self["mapstring"] assert isinstance(filename, basestring) assert isinstance(mapstring, basestring) maplist = mapstring.split("/") fmt = filename.split(".")[-1] assert fmt in ["json", "yaml"] with open(filename) as inp: data = json.load(inp) if fmt == "json" else yaml.safe_load(inp) leaf = reduce(operator.getitem, maplist[:-1], fw_spec) if isinstance(data, dict): if maplist[-1] not in leaf: leaf[maplist[-1]] = data else: leaf[maplist[-1]].update(data) else: leaf[maplist[-1]] = data return FWAction(update_spec={maplist[0]: fw_spec[maplist[0]]})
PypiClean
/LaptopControlPanel-0.1.0.tar.gz/LaptopControlPanel-0.1.0/setup_data.py
#################################################################################################### import os #################################################################################################### def merge_include(src_lines, doc_path, included_rst_files=None): if included_rst_files is None: included_rst_files = {} text = '' for line in src_lines: if line.startswith('.. include::'): include_file_name = line.split('::')[-1].strip() if include_file_name not in included_rst_files: # print "include", include_file_name with open(os.path.join(doc_path, include_file_name)) as f: included_rst_files[include_file_name] = True text += merge_include(f.readlines(), doc_path, included_rst_files) else: text += line return text #################################################################################################### # Utility function to read the README file. # Used for the long_description. def read(file_name): source_path = os.path.dirname(os.path.realpath(__file__)) if os.path.basename(source_path) == 'tools': source_path = os.path.dirname(source_path) elif 'build/bdist' in source_path: source_path = source_path[:source_path.find('build/bdist')] absolut_file_name = os.path.join(source_path, file_name) doc_path = os.path.join(source_path, 'doc', 'sphinx', 'source') # Read and merge includes with open(absolut_file_name) as f: lines = f.readlines() text = merge_include(lines, doc_path) return text #################################################################################################### long_description = read('README.txt') #################################################################################################### setup_dict = dict( name='LaptopControlPanel', version='0.1.0', author='Fabrice Salvaire', author_email='[email protected]', description='A Control Panel for Lenovo Thinkpad Laptop', license = "GPLv3", keywords = "laptop control panel", url='https://github.com/FabriceSalvaire/LaptopControlPanel', scripts=[ 'bin/battery-control', 'bin/battery-monitoring', 'bin/laptop-control-panel', 'bin/fan-calibration', 'bin/round-robin-monitoring', ], packages=['LaptopControlPanel', 'LaptopControlPanel.GUI', 'LaptopControlPanel.GUI.Widgets', 'LaptopControlPanel.GUI.Pages', 'LaptopControlPanel.GUI.Pages.ui', 'LaptopControlPanel.GUI.ui', 'LaptopControlPanel.Monitoring', 'LaptopControlPanel.Config', 'LaptopControlPanel.Acpi', 'LaptopControlPanel.System', 'LaptopControlPanel.Kernel', 'LaptopControlPanel.DataBase', 'LaptopControlPanel.Logging', 'LaptopControlPanel.Tools', 'LaptopControlPanel.Application', ], data_files = [('share/LaptopControlPanel/icons',['share/icons']), ], long_description=long_description, # cf. http://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ "Topic :: Scientific/Engineering", "Intended Audience :: Education", "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: GNU General Public License (GPL)", "Operating System :: OS Independent", "Programming Language :: Python :: 2.7", ], # install_requires=[ # # pip install => Could not find any downloads that satisfy the requirement PyQt4>=4.9 # # 'PyQt4>=4.9', # ], ) #################################################################################################### # # End # ####################################################################################################
PypiClean
/GaitAnalysisToolKit-0.2.0.tar.gz/GaitAnalysisToolKit-0.2.0/docs/gait.rst
=========== Gait Module =========== The :py:mod:`gaitanalysis.gait` module provides tools to process and analyze with typical data collected during the measurement of human locomotion (gait). In general, the three dimensional coordinates throughout time of a set of markers which are attached to anatomical features on the human are tracked. Secondly, various analog signals are recorded. In particular, voltages which are proportional to the applied forces and moments on one or two force plates, voltages from electromyography (EMG) measurements, and/or accelerometers, etc. All of these measurements are stored as discrete samples in time.
PypiClean
/11l-2021.3-py3-none-any.whl/python_to_11l/tokenizer.py
from typing import List, Tuple Char = str from enum import IntEnum keywords = [ # https://docs.python.org/3/reference/lexical_analysis.html#keywords 'False', 'await', 'else', 'import', 'pass', 'None', 'break', 'except', 'in', 'raise', 'True', 'class', 'finally', 'is', 'return', 'and', 'continue', 'for', 'lambda', 'try', 'as', 'def', 'from', 'nonlocal', 'while', 'assert', 'del', 'global', 'not', 'with', 'async', 'elif', 'if', 'or', 'yield',] operators = [ # https://docs.python.org/3/reference/lexical_analysis.html#operators '+', '-', '*', '**', '/', '//', '%', '@', '<<', '>>', '&', '|', '^', '~', '<', '>', '<=', '>=', '==', '!=',] #operators.sort(key = lambda x: len(x), reverse = True) delimiters = [ # https://docs.python.org/3/reference/lexical_analysis.html#delimiters '(', ')', '[', ']', '{', '}', ',', ':', '.', ';', '@', '=', '->', '+=', '-=', '*=', '/=', '//=', '%=', '@=', '&=', '|=', '^=', '>>=', '<<=', '**=',] #delimiters.sort(key = lambda x: len(x), reverse = True) operators_and_delimiters = sorted(operators + delimiters, key = lambda x: len(x), reverse = True) class Error(Exception): message : str pos : int end : int def __init__(self, message, pos): self.message = message self.pos = pos self.end = pos class Token: class Category(IntEnum): # why ‘Category’: >[https://docs.python.org/3/reference/lexical_analysis.html#other-tokens]:‘the following categories of tokens exist’ NAME = 0 # or IDENTIFIER KEYWORD = 1 CONSTANT = 2 OPERATOR_OR_DELIMITER = 3 NUMERIC_LITERAL = 4 STRING_LITERAL = 5 INDENT = 6 # [https://docs.python.org/3/reference/lexical_analysis.html#indentation][-1] DEDENT = 7 STATEMENT_SEPARATOR = 8 start : int end : int category : Category def __init__(self, start, end, category): self.start = start self.end = end self.category = category def __repr__(self): return str(self.start) def value(self, source): return source[self.start:self.end] def to_str(self, source): return 'Token('+str(self.category)+', "'+self.value(source)+'")' def tokenize(source, newline_chars : List[int] = None, comments : List[Tuple[int, int]] = None): tokens : List[Token] = [] indentation_levels : List[int] = [] nesting_elements : List[Tuple[Char, int]] = [] # parentheses, square brackets or curly braces begin_of_line = True expected_an_indented_block = False i = 0 while i < len(source): if begin_of_line: # at the beginning of each line, the line's indentation level is compared to the last of the indentation_levels [:1] begin_of_line = False linestart = i indentation_level = 0 while i < len(source): if source[i] == ' ': indentation_level += 1 elif source[i] == "\t": indentation_level += 8 # consider tab as just 8 spaces (I know that Python 3 use different rules, but I disagree with Python 3 approach ([-1]:‘Tabs are replaced (from left to right) by one to eight spaces’), so I decided to use this simpler solution) else: break i += 1 if i == len(source): # end of source break if source[i] in "\r\n#": # lines with only whitespace and/or comments do not affect the indentation continue prev_indentation_level = indentation_levels[-1] if len(indentation_levels) else 0 if expected_an_indented_block: if not indentation_level > prev_indentation_level: raise Error('expected an indented block', i) if indentation_level == prev_indentation_level: # [1:] [-1]:‘If it is equal, nothing happens.’ [:2] if len(tokens): tokens.append(Token(linestart-1, linestart, Token.Category.STATEMENT_SEPARATOR)) elif indentation_level > prev_indentation_level: # [2:] [-1]:‘If it is larger, it is pushed on the stack, and one INDENT token is generated.’ [:3] if not expected_an_indented_block: raise Error('unexpected indent', i) expected_an_indented_block = False indentation_levels.append(indentation_level) tokens.append(Token(linestart, i, Token.Category.INDENT)) else: # [3:] [-1]:‘If it is smaller, it ~‘must’ be one of the numbers occurring on the stack; all numbers on the stack that are larger are popped off, and for each number popped off a DEDENT token is generated.’ [:4] while True: indentation_levels.pop() tokens.append(Token(i, i, Token.Category.DEDENT)) level = indentation_levels[-1] if len(indentation_levels) else 0 if level == indentation_level: break if level < indentation_level: raise Error('unindent does not match any outer indentation level', i) ch = source[i] if ch in " \t": i += 1 # just skip whitespace characters elif ch in "\r\n": if newline_chars is not None: newline_chars.append(i) i += 1 if ch == "\r" and source[i:i+1] == "\n": i += 1 if len(nesting_elements) == 0: # [https://docs.python.org/3/reference/lexical_analysis.html#implicit-line-joining ‘Implicit line joining’]:‘Expressions in parentheses, square brackets or curly braces can be split over more than one physical line without using backslashes.’ begin_of_line = True elif ch == '#': comment_start = i i += 1 while i < len(source) and source[i] not in "\r\n": i += 1 if comments is not None: comments.append((comment_start, i)) else: expected_an_indented_block = ch == ':' operator_or_delimiter = '' for op in operators_and_delimiters: if source[i:i+len(op)] == op: operator_or_delimiter = op break lexem_start = i i += 1 category : Token.Category if operator_or_delimiter != '': i = lexem_start + len(operator_or_delimiter) category = Token.Category.OPERATOR_OR_DELIMITER if ch in '([{': nesting_elements.append((ch, lexem_start)) elif ch in ')]}': # ([{ if len(nesting_elements) == 0 or nesting_elements[-1][0] != {')':'(', ']':'[', '}':'{'}[ch]: # }]) raise Error('there is no corresponding opening parenthesis/bracket/brace for `' + ch + '`', lexem_start) nesting_elements.pop() elif ch == ';': category = Token.Category.STATEMENT_SEPARATOR elif ch in ('"', "'") or (ch in 'rRbB' and source[i:i+1] in ('"', "'")): ends : str if ch in 'rRbB': ends = source[i:i+3] if source[i:i+3] in ('"""', "'''") else source[i] else: i -= 1 ends = source[i:i+3] if source[i:i+3] in ('"""', "'''") else ch i += len(ends) while True: if i == len(source): raise Error('unclosed string literal', lexem_start) if source[i] == '\\': i += 1 if i == len(source): continue elif source[i:i+len(ends)] == ends: i += len(ends) break i += 1 category = Token.Category.STRING_LITERAL elif ch.isalpha() or ch == '_': # this is NAME/IDENTIFIER or KEYWORD while i < len(source): ch = source[i] if not (ch.isalpha() or ch == '_' or '0' <= ch <= '9' or ch == '?'): break i += 1 if source[lexem_start:i] in keywords: if source[lexem_start:i] in ('None', 'False', 'True'): category = Token.Category.CONSTANT else: category = Token.Category.KEYWORD else: category = Token.Category.NAME elif (ch in '-+' and '0' <= source[i:i+1] <= '9') or '0' <= ch <= '9': # this is NUMERIC_LITERAL if ch in '-+': assert(False) # considering sign as a part of numeric literal is a bad idea — expressions like `j-3` are cease to parse correctly #sign = ch ch = source[i+1] else: i -= 1 is_hex = ch == '0' and source[i+1:i+2] in ('x', 'X') is_oct = ch == '0' and source[i+1:i+2] in ('o', 'O') is_bin = ch == '0' and source[i+1:i+2] in ('b', 'B') if is_hex or is_oct or is_bin: i += 2 # if not '0' <= source[i:i+1] <= '9': # raise Error('expected digit', i) start = i i += 1 if is_hex: while i < len(source) and ('0' <= source[i] <= '9' or 'a' <= source[i] <= 'z' or 'A' <= source[i] <= 'Z' or source[i] == '_'): i += 1 elif is_oct: while i < len(source) and ('0' <= source[i] <= '7' or source[i] == '_'): i += 1 elif is_bin: while i < len(source) and source[i] in '01_': i += 1 else: while i < len(source) and ('0' <= source[i] <= '9' or source[i] in '_.eE'): if source[i] in 'eE': if source[i+1:i+2] in '-+': i += 1 i += 1 if source[i:i+1] in ('j', 'J'): i += 1 if '_' in source[start:i] and not '.' in source[start:i]: # float numbers do not checked for a while number = source[start:i].replace('_', '') number_with_separators = '' j = len(number) while j > 3: number_with_separators = '_' + number[j-3:j] + number_with_separators j -= 3 number_with_separators = number[0:j] + number_with_separators if source[start:i] != number_with_separators: raise Error('digit separator in this number is located in the wrong place (should be: '+ number_with_separators +')', start) category = Token.Category.NUMERIC_LITERAL elif ch == '\\': if source[i] not in "\r\n": raise Error('only new line character allowed after backslash', i) if source[i] == "\r": i += 1 if source[i] == "\n": i += 1 continue else: raise Error('unexpected character ' + ch, lexem_start) tokens.append(Token(lexem_start, i, category)) if len(nesting_elements): raise Error('there is no corresponding closing parenthesis/bracket/brace for `' + nesting_elements[-1][0] + '`', nesting_elements[-1][1]) if expected_an_indented_block: raise Error('expected an indented block', i) while len(indentation_levels): # [4:] [-1]:‘At the end of the file, a DEDENT token is generated for each number remaining on the stack that is larger than zero.’ tokens.append(Token(i, i, Token.Category.DEDENT)) indentation_levels.pop() return tokens
PypiClean
/CalimaPython-2.2.0-py3-none-any.whl/pycalima/cmdline.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from pycalima.Calima import Calima,FindCalimas import sys, getopt, time def printHelp(): print("\nCommand control and monitoring tool for PAX Calima fan. Let the air flow!\n") print("-h\tshow this help list") print("-l\tScan and list all found Calimas") print("-m MAC\tSpecify Calima MAC address") print("-p PIN\tSpecify Calima Pincode") print("-s\tScan all characteristics") print("-b SEC\tEnable Boost mode for SEC seconds") print("-t SPD\tSet trickle speed to SPD in all modes. Use max 2000.") print("\tWarning: this will make fan run at at this speed all the time") def main(): # Define vars action = "" mac_address = "" pincode = "" boostsecs = 0 tricklespeed = 950 try: opts, args = getopt.getopt(sys.argv[1:], "hb:lsm:p:t:") except getopt.GetoptError: printHelp() sys.exit(2) for opt, arg in opts: if opt == '-h': printHelp() sys.exit() elif opt == '-l': print(FindCalimas()) sys.exit() elif opt in ("-m"): mac_address = arg elif opt in ("-p"): pincode = arg elif opt in ("-s"): action = "scan" elif opt == '-b': action = "startboost" boostsecs= int(arg) elif opt == '-t': action = "settrickle" tricklespeed= int(arg) else: usage() sys.exit(2) if pincode == "" or mac_address == "": print("You need to set both mac address and pincode to connect\n") print("Run me with -h to get some help\n") sys.exit(2) fan = Calima(mac_address, pincode) if action == "startboost": print("Setting Boost mode for {} seconds".format(boostsecs)) fan.setBoostMode(1,2250,boostsecs) time.sleep(2) print(fan.getBoostMode()) fan.disconnect() elif action == "scan": fan.scanCharacteristics() #currentState = fan.getStateShort() #for item in currentState._fields: # print("{}={} ".format(item,getattr(currentState, item)),end='') fan.disconnect() elif action == "settrickle": print("Setting tricklespeed to {} ".format(tricklespeed)) fan.setFanSpeedSettings(2250,975,tricklespeed) time.sleep(2) print(fan.getFanSpeedSettings()) fan.disconnect() else: print("Device Name: ", fan.getDeviceName()) print("Model Number: ", fan.getModelNumber()) print("Serial Number: ", fan.getSerialNumber()) print("Hardware Revision: ", fan.getHardwareRevision()) print("Firmware Revision: ", fan.getFirmwareRevision()) print("Software Revision: ", fan.getSoftwareRevision()) print("Manufacturer: ", fan.getManufacturer()) print("Alias: ", fan.getAlias()) print("Factory Settings Changed: ", fan.getFactorySettingsChanged()) print("Mode: ", fan.getMode()) print("Fan Speed Settings: ", fan.getFanSpeedSettings()) print("Sensors Sensitivity: ", fan.getSensorsSensitivity()) print("Light Sensor Settings: ", fan.getLightSensorSettings()) print("Heat Distributor: ", fan.getHeatDistributor()) print("Boost Mode: ", fan.getBoostMode()) print("Led: ", fan.getLed()) print("Automatic Cycles: ", fan.getAutomaticCycles()) print("Time: ", fan.getTime()) print("Silent Hours: ", fan.getSilentHours()) print("Trickle Days: ", fan.getTrickleDays()) while True: try: print(fan.getState()) time.sleep(2) except Exception as e: print(e) fan.disconnect() break if __name__ == '__main__': main()
PypiClean
/BuilT-0.0.4-py3-none-any.whl/built/registry.py
from __future__ import print_function import logging import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from easydict import EasyDict as edict from collections import defaultdict from torchvision import datasets, transforms from built.singleton_decorator import SingletonDecorator from built.forward_hook import DefaultPostForwardHook from built.metric import DefaultMetric from built.logger import DefaultLogger from built.models.mnist import Mnist class Category: """Category class to save a category name and its name of classes""" def __init__(self, name): self._name = name self._class_dict = dict() def __repr__(self): format_str = self.__class__.__name__ format_str += f'(name={self._name}, items={list(self._class_dict.keys())})' return format_str @property def name(self): return self._name @property def class_dict(self): return self._class_dict def get(self, key: str): return self._class_dict.get(key, None) def add(self, klass): """add a callable class. Args: klass: callable class to be registered """ if not callable(klass): raise ValueError(f'object must be callable') class_name = klass.__name__ if class_name in self._class_dict: print(f'{class_name} is already registered in {self.name}') else: self._class_dict[class_name] = klass return klass class Registry: """Registry class to regist classes with categories""" categories = dict() # def __repr__(self): # format_str = self.__class__.__name__ # for k, v in self.categories.items(): # format_str += f'(category[{k}]: {v})' # return format_str @classmethod def clear(cls): cls.categories.clear() @classmethod def add(cls, category, klass=None): if category not in cls.categories: cls.categories[category] = Category(category) if klass is not None: cls.categories[category].add(klass) @classmethod def register(cls, category=''): def _register(klass): return cls.add(category, klass) return _register @classmethod def build_from_config(cls, category, config, default_args=None): """Build a callable object from configuation dict. Args: category: The name of category to search the name from. config (dict): Configuration dict. It should contain the key "name". default_args (dict, optional): Default initialization argments. """ assert isinstance(config, dict) and 'name' in config assert isinstance(default_args, dict) or default_args is None name = config['name'] name = name.replace('-', '_') cls.add(category) klass = cls.categories[category].get(name) if klass is None: raise KeyError(f'{name} is not in the {category} registry') args = dict() if default_args is not None: args.update(default_args) if 'params' in config: args.update(config['params']) return klass(**args)
PypiClean
/HIRISEimgs-1.0.0.tar.gz/HIRISEimgs-1.0.0/HIRISE_api/hirise/utils.py
import requests from bs4 import BeautifulSoup def file_parameters_list(): """ Function that returns file parameters list """ return [ "FILE_NAME", "REQUIRED_STORAGE_BYTES", "MISSION_PHASE_NAME", "ORBIT_NUMBER", ] def image_map_parameters_list(): """ Function that returns image mapping parameters list """ return [ "CENTER_LATITUDE", "CENTER_LONGITUDE", "MAXIMUM_LATITUDE", "MINIMUM_LATITUDE", "EASTERNMOST_LONGITUDE", "WESTERNMOST_LONGITUDE", ] def viewing_parameters_list(): """ Function that returns viewing parameters list """ return [ "INCIDENCE_ANGLE", "EMISSION_ANGLE", "PHASE_ANGLE", "LOCAL_TIME", "SOLAR_LONGITUDE", "SUB_SOLAR_AZIMUTH", "NORTH_AZIMUTH", ] def timing_parameters_list(): """ Function that returns timing parameters list """ return [ "MRO:OBSERVATION_START_TIME", "START_TIME", "SPACECRAFT_CLOCK_START_COUNT", "STOP_TIME", "SPACECRAFT_CLOCK_STOP_COUNT", "PRODUCT_CREATION_TIME", ] def other_parameters_list(): """ Function that returns scaling factor, offset, center filter wavelength parameters list """ return ["SCALING_FACTOR", "OFFSET", "CENTER_FILTER_WAVELENGTH"] def LBL_parser(label_url): """ Function that parses the .LBL file in NASA's Planetary Data System """ # Get the data from the url req = requests.get(label_url, stream=True) lbl_file = req.text lbl_dict = { "file_parameters": {}, "image_map_parameters": {}, "viewing_parameters": {}, "timing_parameters": {}, "other_parameters": {}, } # Select the parameters to save file_parameters = file_parameters_list() image_map_parameters = image_map_parameters_list() viewing_parameters = viewing_parameters_list() timing_parameters = timing_parameters_list() other_parameters = other_parameters_list() file_dict = dict.fromkeys(file_parameters, "") image_dict = dict.fromkeys(image_map_parameters, "") view_dict = dict.fromkeys(viewing_parameters, "") time_dict = dict.fromkeys(timing_parameters, "") other_dict = dict.fromkeys(other_parameters, "") for line in lbl_file.split("\n"): # Split the label data if "=" in line: line_array = line.split("=") line_key = line_array[0].strip() line_value = line_array[1].strip() # Append required values in the respective df formats if line_key in file_parameters: file_dict[line_key] = line_value elif line_key in viewing_parameters: view_dict[line_key] = line_value elif line_key in timing_parameters: time_dict[line_key] = line_value elif line_key in other_parameters: other_dict[line_key] = line_value elif line_key in image_map_parameters: image_dict[line_key] = line_value lbl_dict["file_parameters"] = file_dict lbl_dict["image_map_parameters"] = image_dict lbl_dict["viewing_parameters"] = view_dict lbl_dict["timing_parameters"] = time_dict lbl_dict["other_parameters"] = other_dict return lbl_dict def get_website_data(base_url, page_key, sub_key=None): """ Function that assistes in wescaping the NASA website """ if sub_key: page_url = base_url + str(page_key) + "/" + sub_key else: page_url = base_url + str(page_key) # Get the data from the url req = requests.get(page_url) # Use soup to create an HTML parser soup = BeautifulSoup(req.text, "html.parser") # Find all the labels with the correct tag - "a" labels = soup.find_all("a") # List to save orbital labels page_labels = [] # Loop through all the orbital ranges for i in range(1, len(labels)): # Save orbital labels into a list page_labels.append(labels[i]["href"]) return page_labels, len(page_labels) def validate_append_float_data(param, list_of_params): """ Function that validates floating point data """ if param: list_of_params.append(float(param.split("<")[0])) else: list_of_params.append(None) def append_float_data_without_strip(param, list_of_params): """ Function that validates floating point data without stripping the last characters """ if param: list_of_params.append(float(param)) else: list_of_params.append(None) def downloadRange(start_range, end, step): i = start_range while i < end: yield i i += step yield end
PypiClean
/CocoRPy27-1.4.1.zip/CocoRPy27-1.4.1/pimaker/piBuilder.py
from fileTools import * class Rule( object ): def __init__( self, target, dependancies, action, description=None ): global TARGET, NEEDS self._target = target self._description = description self._dependancies = { } # dependency to rule self._action = None TARGET = self._target NEEDS = [ ] for dependancy in dependancies: NEEDS.append( self.expandMacros(dependancy) ) for dependancy in NEEDS: self._dependancies[ dependancy ] = None self._action = self.expandMacros( action ) def getDependancies( self ): return self._dependancies.keys( ) def getDescription( self ): return self._description def defineDependencyRule( self, dependencyName, Rule ): self._dependancies[ dependencyName ] = Rule def build( self ): target = os.path.normpath( self._target ) # Handle subrules if len( self._dependancies ) == 0: needToRebuildTarget = True else: needToRebuildTarget = not os.path.exists( target ) for dependency, subrule in self._dependancies.iteritems( ): dependency = os.path.normpath( dependency ) if not os.path.exists( dependency ): if subrule: subrule.build( ) print '-----------------------------------' else: raise Exception( 'No rule to build target %s' % dependency ) if os.path.exists( target ): targetTime = os.stat( target ).st_mtime dependencyTime = os.stat( dependency ).st_mtime if dependencyTime > targetTime: needToRebuildTarget = True if needToRebuildTarget: print 'Building: %s' % target execActionCode( self._action ) def expandMacros( self, aString ): macroStart = aString.find( '${' ) while macroStart != -1: macroEnd = aString.find( '}', macroStart+2 ) macroSeq = aString[ macroStart+2 : macroEnd ].split( ',' ) if '.' in macroSeq[0]: var,op = macroSeq[0].split('.') else: var = macroSeq[0] op = None value = globals( )[ var ] if isinstance( value, (str,unicode) ): if op == 'dir': value = os.path.dirname( value ) elif op == 'name': value = os.path.basename( value ) elif op == 'base': value = os.path.splitext( os.path.basename( value ) )[0] elif op == 'ext': value = os.path.splitext( os.path.basename( value ) )[1] if len(macroSeq) > 1: mark = macroSeq[1].find( '=' ) if mark == -1: raise Exception( 'Error in macro' ) old = macroSeq[1][ : mark ] new = macroSeq[1][ mark+1 : ] value = value.replace( old, new ) aString = aString[ :macroStart ] + value + aString[ macroEnd+1: ] macroStart = aString.find( '${' ) elif isinstance( value, (list,tuple) ): pass return aString class Builder( object ): def __init__( self, projectName='' ): self._rules = { } self._topLevelTargets = [ ] self._allTargets = [ ] self._leaves = [ ] self._projectName = projectName self._menuOrder = [ ] def addTarget( self, targets, *args ): if isinstance( targets, str ): targets = [ targets ] numArgs = len( args ) if numArgs == 3: # We have a description description = args[0] dependencies = args[1] actions = args[2] elif numArgs == 2: # No description description = None dependencies = args[0] actions = args[1] else: raise TypeError( 'addTarget() takes 3 or 4 arguments (%d given)' % numArgs ) for target in targets: depend = [ ] self._rules[ target ] = Rule( target, dependencies, actions, description ) self._menuOrder.append( target ) def finalize( self ): # Assemble the Rules into a Tree for target, rule in self._rules.iteritems( ): for dependencyName in rule.getDependancies( ): if dependencyName in self._rules: rule.defineDependencyRule( dependencyName, self._rules[ dependencyName ] ) else: if dependencyName not in self._leaves: self._leaves.append( dependencyName ) # Determine the top-level targets targets = self._rules.keys( ) for name in self._rules.keys( ): for rule in self._rules.values( ): if name in rule.getDependancies( ): if name in targets: targets.remove( name ) targets.sort() self._topLevelTargets = [ ] for targetName in self._menuOrder: if targetName in targets: self._topLevelTargets.append( ( targetName, self._rules[ targetName ].getDescription() ) ) # Create the allTargets list self._allTargets = [ ] for targetName in self._menuOrder: descr = self._rules[ targetName ].getDescription() if descr is not None: self._allTargets.append( ( targetName, descr ) ) else: self._allTargets.append( ( targetName, '' ) ) def targets( self ): return self._rules.keys( ) def build( self, target ): print '********** Building Target: %s...' % target try: theRule = self._rules[ target ] theRule.build( ) print '********** Build completed.' except: type,value,trace = sys.exc_info( ) print 'BUILD FAILED!!!' print traceback.print_exc( ) def topTargets( self ): return self._topLevelTargets def allTargets( self ): return self._rules.keys( ) def initializationCode( self, code ): execActionCode( code ) def goInteractive( self ): print print 'Entering Interactive Builder' response = '' self._buildMenu( self._topLevelTargets ) while response != 'exit': target = raw_input( '\nbuild> ' ) if target == 'showall': self._buildMenu( self._allTargets ) continue elif target == 'exit': break elif target in self.allTargets( ): print print self.build( target ) self._buildMenu( self._topLevelTargets ) else: print ' !!! Unknown Target !!!' def _buildMenu( self, targetList ): print print 'Targets:' for targetName, targetDescription in targetList: if targetDescription: print ' %-20s %s' % ( targetName, targetDescription ) else: print ' %-20s' % ( targetName ) print print ' %-20s %s' % ( 'showall', 'Show the full list of targets.' ) print ' %-20s %s' % ( 'exit', 'Exit the interactive builder.' ) bld = Builder( ) def buildTarget( aTargetName ): bld.build( aTargetName ) def execActionCode( code ): exec code in globals( )
PypiClean
/Eve-SQLAlchemy-0.7.1.tar.gz/Eve-SQLAlchemy-0.7.1/eve_sqlalchemy/config/domainconfig.py
from __future__ import unicode_literals from eve.utils import config class DomainConfig(object): """Create an Eve `DOMAIN` dict out of :class:`ResourceConfig`s. Upon rendering the `DOMAIN` dictionary, we will first collect all given :class:`ResourceConfig` objects and pass them as `related_resource_configs` to their `render` methods. This way each :class:`ResourceConfig` knows about all existent endpoints in `DOMAIN` and can properly set up relations. A special case occurs if one model is referenced for more than one endpoint, e.g.: DomainConfig({ 'users': ResourceConfig(User), 'admins': ResourceConfig(User), 'groups': ResourceConfig(Group) }) Here, we cannot reliably determine which resource should be used for relations to `User`. In this case you have to specify the target resource for all such relations: DomainConfig({ 'users': ResourceConfig(User), 'admins': ResourceConfig(User), 'groups': ResourceConfig(Group) }, related_resources={ (Group, 'members'): 'users', (Group, 'admins'): 'admins' }) """ def __init__(self, resource_configs, related_resources={}): """Initializes the :class:`DomainConfig` object. :param resource_configs: mapping of endpoint names to :class:`ResourceConfig` objects :param related_resources: mapping of (model, field name) to a resource """ self.resource_configs = resource_configs self.related_resources = related_resources def render(self, date_created=config.DATE_CREATED, last_updated=config.LAST_UPDATED, etag=config.ETAG): """Renders the Eve `DOMAIN` dictionary. If you change any of `DATE_CREATED`, `LAST_UPDATED` or `ETAG`, make sure you pass your new value. :param date_created: value of `DATE_CREATED` :param last_updated: value of `LAST_UPDATED` :param etag: value of `ETAG` """ domain_def = {} related_resource_configs = self._create_related_resource_configs() for endpoint, resource_config in self.resource_configs.items(): domain_def[endpoint] = resource_config.render( date_created, last_updated, etag, related_resource_configs) return domain_def def _create_related_resource_configs(self): """Creates a mapping from model to (resource, :class:`ResourceConfig`). This mapping will be passed to all :class:`ResourceConfig` objects' `render` methods. If there is more than one resource using the same model, relations for this model cannot be set up automatically. In this case you will have to manually set `related_resources` when creating the :class:`DomainConfig` object. """ result = {} keys_to_remove = set() for resource, resource_config in self.resource_configs.items(): model = resource_config.model if model in result: keys_to_remove.add(model) result[model] = (resource, resource_config) for key in keys_to_remove: del result[key] for field, resource in self.related_resources.items(): result[field] = (resource, self.resource_configs[resource]) return result
PypiClean
/FaceChannelV1/Model.py
import numpy from keras.models import load_model, Model, Input from keras.models import Input from keras.layers import Dense, Dropout, Flatten, Lambda from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D from keras.layers.core import Activation from keras.layers.normalization import BatchNormalization import os import keras """Load Trained Models""" """Models from Scratch""" def getDimensionalModel(): backbone = buildFaceChannel() dense = Dense(200, activation="relu", name="denseLayer")(backbone) arousal_output = Dense(units=1, activation='tanh', name='arousal_output')(dense) valence_output = Dense(units=1, activation='tanh', name='valence_output')(dense) return Model(inputs=backbone.input, outputs=[arousal_output, valence_output]) def getCategoricalModel(numberClasses): backbone = buildFaceChannel() dense = Dense(200, activation="relu", name="denseLayer")(backbone) categoricalOutput = Dense(units=numberClasses, activation='tanh', name='categoricalOutput')(dense) return Model(inputs=backbone.input, outputs=categoricalOutput) def buildFaceChannel(): def shuntingInhibition(inputs): inhibitionDecay = 0.5 v_c, v_c_inhibit = inputs output = (v_c / (inhibitionDecay + v_c_inhibit)) return output keras.backend.set_image_data_format("channels_first") nch = 256 inputShape = numpy.array((1, 64, 64)).astype(numpy.int32) inputLayer = Input(shape=inputShape, name="Vision_Network_Input") # Conv1 and 2 conv1 = Conv2D(nch / 4, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv1")( inputLayer) bn1 = BatchNormalization(axis=1)(conv1) actv1 = Activation("relu")(bn1) conv2 = Conv2D(nch / 4, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv2")(actv1) bn2 = BatchNormalization(axis=1)(conv2) actv2 = Activation("relu")(bn2) mp1 = MaxPooling2D(pool_size=(2, 2))(actv2) drop1 = Dropout(0.25)(mp1) # Conv 3 and 4 conv3 = Conv2D(nch / 2, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv3")(drop1) bn3 = BatchNormalization(axis=1)(conv3) actv3 = Activation("relu")(bn3) conv4 = Conv2D(nch / 2, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv4")(actv3) bn4 = BatchNormalization(axis=1)(conv4) actv4 = Activation("relu")(bn4) mp2 = MaxPooling2D(pool_size=(2, 2))(actv4) drop2 = Dropout(0.25)(mp2) # Conv 5 and 6 and 7 conv5 = Conv2D(nch / 2, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv5")(drop2) bn5 = BatchNormalization(axis=1)(conv5) actv5 = Activation("relu")(bn5) conv6 = Conv2D(nch / 2, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv6")(actv5) bn6 = BatchNormalization(axis=1)(conv6) actv6 = Activation("relu")(bn6) conv7 = Conv2D(nch / 2, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv7")(actv6) bn7 = BatchNormalization(axis=1)(conv7) actv7 = Activation("relu")(bn7) mp3 = MaxPooling2D(pool_size=(2, 2))(actv7) drop3 = Dropout(0.25)(mp3) # Conv 8 and 9 and 10 conv8 = Conv2D(nch, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="Vision_conv8")(drop3) bn8 = BatchNormalization(axis=1)(conv8) actv8 = Activation("relu")(bn8) conv9 = Conv2D(nch, (3, 3), padding="same", kernel_initializer="glorot_uniform", name="conv9")(actv8) bn9 = BatchNormalization(axis=1)(conv9) actv9 = Activation("relu")(bn9) conv10 = Conv2D(nch, (3, 3), padding="same", kernel_initializer="glorot_uniform", activation="relu", name="conv10")(actv9) conv10_inhibition = Conv2D(nch, (3, 3), padding="same", kernel_initializer="glorot_uniform", activation="relu", name="conv10_inhibition")(actv9) v_conv_inhibitted = Lambda(function=shuntingInhibition)([conv10, conv10_inhibition]) mp4 = MaxPooling2D(pool_size=(2, 2))(v_conv_inhibitted) drop4 = Dropout(0.25)(mp4) flatten = Flatten()(drop4) return flatten
PypiClean
/EpyNN-1.2.11.tar.gz/EpyNN-1.2.11/epynn/convolution/parameters.py
import math # Related third party imports import numpy as np def convolution_compute_shapes(layer, A): """Compute forward shapes and dimensions for layer. """ X = A # Input of current layer layer.fs['X'] = X.shape # (m, h, w, d) layer.d['m'] = layer.fs['X'][0] # Number of samples (m) layer.d['h'] = layer.fs['X'][1] # Height of features (h) layer.d['w'] = layer.fs['X'][2] # Width of features (w) layer.d['d'] = layer.fs['X'][3] # Depth of features (d) # Output height (oh) and width (ow) layer.d['oh'] = math.floor((layer.d['h']-layer.d['fh']) / layer.d['sh']) + 1 layer.d['ow'] = math.floor((layer.d['w']-layer.d['fw']) / layer.d['sw']) + 1 # Shapes for trainable parameters # filter_height (fh), filter_width (fw), features_depth (d), unit_filters (u) layer.fs['W'] = (layer.d['fh'], layer.d['fw'], layer.d['d'], layer.d['u']) layer.fs['b'] = (layer.d['u'], ) return None def convolution_initialize_parameters(layer): """Initialize parameters for layer. """ # For linear activation of inputs (Z) layer.p['W'] = layer.initialization(layer.fs['W'], rng=layer.np_rng) layer.p['b'] = np.zeros(layer.fs['b']) # Z = X * W + b return None def convolution_compute_gradients(layer): """Compute gradients with respect to weight and bias for layer. """ # Gradients initialization with respect to parameters for parameter in layer.p.keys(): gradient = 'd' + parameter layer.g[gradient] = np.zeros_like(layer.p[parameter]) Xb = layer.fc['Xb'] # Input blocks of forward propagation dZ = layer.bc['dZ'] # Gradient of the loss with respect to Z # Expand dZ dimensions with respect to Xb dZb = dZ dZb = np.expand_dims(dZb, axis=3) # (m, oh, ow, 1, u) dZb = np.expand_dims(dZb, axis=3) # (m, oh, ow, 1, 1, u) dZb = np.expand_dims(dZb, axis=3) # (m, oh, ow, 1, 1, 1, u) # (1) Gradient of the loss with respect to W, b dW = layer.g['dW'] = np.sum(dZb * Xb, axis=(2, 1, 0)) # (1.1) dL/dW db = layer.g['db'] = np.sum(dZb, axis=(2, 1, 0)) # (1.2) dL/db layer.g['db'] = db.squeeze() if layer.use_bias else 0. return None def convolution_update_parameters(layer): """Update parameters for layer. """ for gradient in layer.g.keys(): parameter = gradient[1:] # Update is driven by learning rate and gradients layer.p[parameter] -= layer.lrate[layer.e] * layer.g[gradient] return None
PypiClean
/mynewspaper-4.0.tar.gz/mynewspaper-4.0/tools/migrate_db_from_v3_to_v4.py
# CAUTION: this script migrates a MyNewspaper database from # v3.x to v4.x format # Copyright (C) 2013 Iñigo Serna # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. __author__ = 'Iñigo Serna' __revision__ = '1.0' import os import os.path import sys import sqlite3 import datetime import time from concurrent.futures import ThreadPoolExecutor, as_completed sys.path.append(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(os.path.join(os.path.dirname(__file__), '../src')) from utils import get_feed_icon ###################################################################### ##### Variables STMT_CREATE_DB = """ CREATE TABLE groups ( gid INTEGER PRIMARY KEY, name VARCHAR(100) UNIQUE NOT NULL, sort_idx INT, num_total INT, num_unread INT, num_starred INT, num_later INT, num_archived INT ); CREATE TABLE feeds ( fid INTEGER PRIMARY KEY, name VARCHAR(100) UNIQUE NOT NULL, url VARCHAR(255) NOT NULL, link VARCHAR(255), icon VARCHAR(255), state INT, -- 0:archive, 1:enabled, 2:disabled, 3:broken sort_idx INT, gid INT, etag VARCHAR(255), lastupdate VARCHAR(100), -- feed sent timestamp, as string artsperhour INT, num_total INT, num_unread INT, num_starred INT, num_later INT, num_archived INT ); CREATE INDEX idx_feeds_group ON feeds (gid); CREATE TABLE articles ( aid INTEGER PRIMARY KEY, uid VARCHAR(255) UNIQUE NOT NULL, title VARCHAR(255), url VARCHAR(255), contents TEXT, timestamp TIMESTAMP, state INT, -- 0:total/delete, 1:unread, 2:starred, 3:later, 4:archived fid INT ); CREATE INDEX idx_articles_uid ON articles (uid); CREATE INDEX idx_articles_state ON articles (state); CREATE INDEX idx_articles_feed ON articles (fid); INSERT INTO groups VALUES (0, 'Uncategorized', 0, 0, 0, 0, 0, 0); INSERT INTO feeds VALUES (0, 'Archive', '', '', '', 0, 0, -1, '', '', 0, 0, 0, 0, 0, 0); """ ###################################################################### ##### Functions class Feed: def __init__(self, fid, name, link): self.fid = fid self.name = name self.link = link def __repr__(self): return '<Feed {0.fid}: {0.name}>'.format(self) def migrate_db(db_old, db_new, add_icons=True): db0 = sqlite3.connect(db_old) db0.row_factory = sqlite3.Row c0 = db0.cursor() print('Creating new DB') db1 = sqlite3.connect(db_new) db1.row_factory = sqlite3.Row c1 = db1.cursor() c1.executescript(STMT_CREATE_DB) print('Migrating Folders') c0.execute('SELECT * FROM rss_group') for r in c0.fetchall(): c1.execute('INSERT INTO groups VALUES (?, ?, ?, 0, 0, 0, 0, 0)', (r['id'], r['name'], r['sort_idx'])) print('Migrating Feeds') c0.execute('SELECT * FROM feed') for r in c0.fetchall(): lastupdate = '' # datetime.datetime.strptime(r['lastupdate'], '%Y-%m-%d %H:%M:%S') state = 2 if r['disabled'] else 1 c1.execute('INSERT INTO feeds VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, -1, 0, 0, 0, 0, 0)', (r['id'], r['name'], r['url'], r['link'], '', state, r['sort_idx'], r['rss_group_id'], r['etag'], lastupdate)) print('Migrating Articles') c0.execute('SELECT * FROM article') for r in c0.fetchall(): timestamp = r['timestamp'] + '+00:00' if r['state'] == 2: state = 4 # archived elif r['state'] == 3: state = 2 # starred elif r['state'] == 4: state = 3 # later else: state = r['state'] c1.execute('INSERT INTO articles VALUES (?, ?, ?, ?, ?, ?, ?, ?)', (r['id'], r['aid'], r['title'], r['url'], r['contents'], timestamp, state, r['feed_id'])) print('Fixing numbers') ST = ('num_total', 'num_unread', 'num_starred', 'num_later', 'num_archived') c1.execute('SELECT fid FROM feeds') for r in c1.fetchall(): for st, it in enumerate(ST): if st == 0: continue # num_total stmt = 'UPDATE feeds SET %s=(SELECT COUNT(*) FROM articles WHERE fid=? AND state=%d) WHERE fid=?' % (it, st) c1.execute(stmt, (r[0], r[0])) c1.execute('UPDATE feeds SET num_total=num_unread+num_starred+num_later+num_archived WHERE fid=?', (r[0], )) c1.execute('SELECT gid FROM groups') for r in c1.fetchall(): for it in ST: stmt = 'UPDATE groups SET %s=(SELECT COALESCE(SUM(%s), 0) FROM feeds WHERE gid=?) WHERE gid=?' % (it, it) c1.execute(stmt, (r[0], r[0])) if add_icons: print('Adding feed icons... be patience, this will take some time') c1.execute('SELECT fid, name, link FROM feeds') feeds = [Feed(r['fid'], r['name'], r['link']) for r in c1.fetchall()] with ThreadPoolExecutor(max_workers=10) as e: futures_favicon = {e.submit(get_feed_icon, f): f for f in feeds} for future in as_completed(futures_favicon): try: feed = futures_favicon[future] icon_url = future.result() except Exception as exc: print('Error adding favicon for %s: %s' % (feed, exc)) else: if icon_url is not None: c1.execute('UPDATE feeds SET icon=? WHERE fid=?', (icon_url, feed.fid)) else: print('Won\'t add feed icons') db1.commit() c0.close() db0.close() c1.close() db1.close() def main(db_old, db_new, add_icons=True): t0 = time.time() print('MyNewspaper DB migration tool v3 -> v4') print('---------------------------------------') print('Old DB: %s' % db_old) print('New DB: %s' % db_new) print if not os.path.exists(db_old): print('Old db file <%s> does not exist' % db_old) sys.exit(-1) if os.path.exists(db_new): os.unlink(db_new) migrate_db(db_old, db_new, add_icons) tot = time.time() - t0 if tot > 60: print('Migration finished in %d\'%2.2d"' % divmod(tot, 60)) else: print('Migration finished in %d sec' % tot) ###################################################################### ##### Main if __name__ == '__main__': if len(sys.argv) == 3 and sys.argv[1] != sys.argv[2]: main(sys.argv[1], sys.argv[2]) elif len(sys.argv) == 4 and sys.argv[1] != sys.argv[2] and sys.argv[3] == '--no-favicons': main(sys.argv[1], sys.argv[2], False) else: print('%s <old.db> <new.db> [--no-favicons]' % sys.argv[0]) sys.exit(-1) ######################################################################
PypiClean
/MagnetiCalc-1.15.2.tar.gz/MagnetiCalc-1.15.2/magneticalc/QtWidgets2/QLayouted.py
# ISC License # # Copyright (c) 2020–2022, Paul Wilhelm, M. Sc. <[email protected]> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from typing import Dict, Tuple, Callable, Union, Optional from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QLayout, QWidget, QPushButton from magneticalc.QtWidgets2.QButtons import QButtons class QLayouted: """ QLayouted class. """ def __init__(self, direction: str = "vertical") -> None: """ Initializes the QLayouted class. This adds a layout and several related functions like addWidget() to the parent class. @param direction: Sets "vertical" or "horizontal" layout """ self._layout = QVBoxLayout() if direction == "vertical" else QHBoxLayout() def install_layout(self, parent: QWidget) -> None: """ Installs this layout in the parent. """ parent.setLayout(self._layout) # noinspection PyPep8Naming def addWidget(self, widget, alignment: Optional[Union[Qt.Alignment, Qt.AlignmentFlag]] = None) -> None: """ Adds widget. @param widget: QWidget @param alignment: Alignment """ if alignment: self._layout.addWidget(widget) else: self._layout.addWidget(widget) # noinspection PyPep8Naming def addLayout(self, layout: QLayout) -> None: """ Adds layout. @param layout: QLayout """ self._layout.addLayout(layout) # noinspection PyPep8Naming def addSpacing(self, spacing: int) -> None: """ Adds spacing. @param spacing: Spacing value """ self._layout.addSpacing(spacing) # noinspection PyPep8Naming def addButtons(self, data: Dict[str, Tuple[str, Callable]]) -> Dict[int, QPushButton]: """ Adds buttons. @param data: Dictionary {text: (icon, callback), …} @return: Dictionary {index: QPushButton, …} """ buttons = QButtons(data) self.addLayout(buttons) return buttons.dictionary
PypiClean
/NeurIPS22-CellSeg-0.0.1.tar.gz/NeurIPS22-CellSeg-0.0.1/README.md
# NeurIPS-CellSeg A naive baseline and submission demo for the [microscopy image segmentation challenge in NeurIPS 2022](https://neurips22-cellseg.grand-challenge.org/) ## Requirements ```python MONAI version: 0.9 Numpy version: 1.21.2 Pytorch version: 1.10.1 Nibabel version: 3.2.1 scikit-image version: 0.19.2 Pillow version: 9.0.1 Tensorboard version: 2.8.0 gdown version: 4.2.0 TorchVision version: 0.11.2 tqdm version: 4.63.0 psutil version: 5.8.0 pandas version: 1.4.1 einops version: 0.3.2 ``` ## Preprocessing Download training data to the `data` folder Run `python pre_process_3class.py` ## Training `cd baseline` Run `python model_training_3class.py --data_path 'path to training data' --batch_size 8` ## Inference Run `python predict.py -i input_path -o output_path` > Your prediction file should have at least the two arguments: `input_path` and `output_path`. The two arguments are important to establish connections between local folders and docker folders. ## Build Docker We recommend this great tutorial: https://nbviewer.org/github/ericspod/ContainersForCollaboration/blob/master/ContainersForCollaboration.ipynb ### 1) Preparation The docker is built on [MONAI](https://hub.docker.com/r/projectmonai/monai) > docker pull projectmonai/monai Prepare `Dockerfile` ```dockerfile FROM projectmonai/monai:latest WORKDIR /workspace COPY ./ /workspace ``` Put the inference command in the `predict.sh` ```bash # !/bin/bash -e python predict.py -i "/workspace/inputs/" -o "/workspace/outputs/" ``` > The `input_path` and `output_path` augments should specify the corresponding docker workspace folders rather than local folders, because we will map the local folders to the docker workspace folders when running the docker container. ### 2) Build Docker and make sanity test The submitted docker will be evaluated by the following command: ```bash docker container run --gpus "device=0" --name teamname --rm -v $PWD/CellSeg_Test/:/workspace/inputs/ -v $PWD/teamname_outputs/:/workspace/outputs/ teamname:latest /bin/bash -c "sh predict.sh" ``` - `--name`: container name during running - `--rm`: remove the container after running - `-v $PWD/CellSeg_Test/:/workspace/inputs/`: map local image data folder to Docker `workspace/inputs` folder. - `-v $PWD/teamname_outputs/:/workspace/outputs/ `: map Docker `workspace/outputs` folder to local folder. The segmentation results will be in `$PWD/teamname_outputs` - `teamname:latest`: docker image name (should be `teamname`) and its version tag. **The version tag should be `latest`**. Please do not use `v0`, `v1`... as the version tag - `/bin/bash -c "sh predict.sh"`: start the prediction command. It will load testing images from `workspace/inputs` and save the segmentation results to `workspace/outputs` Assuming the team name is `baseline`, the Docker build command is `docker build -t baseline . ` Test the docker to make sure it works. There should be segmentation results in the `baseline_outputs` folder. ```bash docker container run --gpus "device=0" --name baseline --rm -v $PWD/CellSeg_Test/:/workspace/inputs/ -v $PWD/baseline_outputs/:/workspace/outputs/ baseline:latest /bin/bash -c "sh predict.sh" ``` > During the inference, please monitor the GPU memory consumption using `watch nvidia-smi`. The GPU memory consumption should be less than 1500MB. Otherwise, it will run into an OOM error on the official evaluation server. We impose this hard constraint on GPU memory consumption to ensure ease of use, because biologists may not have powerful GPUs in practice. Thus, the model should be low-resource. ### 3) Save Docker `docker save baseline | gzip -c > baseline.tar.gz` Upload the docker to Google drive or Baidu net disk and send the download link to `[email protected]`. > Please **do not** upload the Docker to dockerhub! ## Limitations and potential improvements The naive baseline's primary aim is to give participants out-of-the-box scripts that can generate successful submisions. Thus, there are many ways to surpass this baseline: - New cell representation methods. In the baseline, we separated touching cells by simply removing their boundaries. More advanced cell representation could be used to address this issue, such as [stardist](https://github.com/stardist/stardist), [cellpose](https://github.com/MouseLand/cellpose), [omnipose](https://github.com/kevinjohncutler/omnipose), [deepcell](https://github.com/vanvalenlab/deepcell-tf), and so on. - New architectures - More data augmentations and the use of additional [public datasets](https://grand-challenge.org/forums/forum/weakly-supervised-cell-segmentation-in-multi-modality-microscopy-673/topic/official-external-datasets-thread-720/) or the set of unlabeled data provided. - Well-designed training protocols - Postprocessing Nevertheless, please always keep in mind that many end users do not have powerful computation resources. It's important to consider the trade-off between resource consumption and accuracy.
PypiClean
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/beav2.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def beav2(path): """Body Temperature Series of Beaver 2 Reynolds (1994) describes a small part of a study of the long-term temperature dynamics of beaver *Castor canadensis* in north-central Wisconsin. Body temperature was measured by telemetry every 10 minutes for four females, but data from a one period of less than a day for each of two animals is used there. The `beav2` data frame has 100 rows and 4 columns. This data frame contains the following columns: `day` Day of observation (in days since the beginning of 1990), November 3–4. `time` Time of observation, in the form `0330` for 3.30am. `temp` Measured body temperature in degrees Celsius. `activ` Indicator of activity outside the retreat. P. S. Reynolds (1994) Time-series analyses of beaver body temperatures. Chapter 11 of Lange, N., Ryan, L., Billard, L., Brillinger, D., Conquest, L. and Greenhouse, J. eds (1994) *Case Studies in Biometry.* New York: John Wiley and Sons. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `beav2.csv`. Returns: Tuple of np.ndarray `x_train` with 100 rows and 4 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'beav2.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/MASS/beav2.csv' maybe_download_and_extract(path, url, save_file_name='beav2.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
PypiClean
/Flask-User-AWS-1.0.1.7.tar.gz/Flask-User-AWS-1.0.1.7/flask_user/password_manager.py
from __future__ import print_function from flask import current_app from passlib.context import CryptContext class PasswordManager(object): """Hash and verify user passwords using passlib """ def __init__(self, app): """ Create a passlib CryptContext. Args: password_hash(str): The name of a valid passlib password hash. Examples: ``'bcrypt', 'pbkdf2_sha512', 'sha512_crypt' or 'argon2'``. Example: ``password_manager = PasswordManager('bcrypt')`` """ self.app = app self.user_manager = app.user_manager # Create a passlib CryptContext self.password_crypt_context = CryptContext( schemes=self.user_manager.USER_PASSLIB_CRYPTCONTEXT_SCHEMES, **self.user_manager.USER_PASSLIB_CRYPTCONTEXT_KEYWORDS) def hash_password(self, password): """Hash plaintext ``password`` using the ``password_hash`` specified in the constructor. Args: password(str): Plaintext password that the user types in. Returns: hashed password. Example: ``user.password = hash_password('mypassword')`` """ # Use passlib's CryptContext to hash a password password_hash = self.password_crypt_context.encrypt(password) return password_hash def verify_password(self, password, password_hash): """Verify plaintext ``password`` against ``hashed password``. Args: password(str): Plaintext password that the user types in. password_hash(str): Password hash generated by a previous call to ``hash_password()``. Returns: | True when ``password`` matches ``password_hash``. | False otherwise. Example: :: if verify_password('mypassword', user.password): login_user(user) """ # Print deprecation warning if called with (password, user) instead of (password, user.password) if isinstance(password_hash, self.user_manager.db_manager.UserClass): print( 'Deprecation warning: verify_password(password, user) has been changed'\ ' to: verify_password(password, password_hash). The user param will be deprecated.'\ ' Please change your call with verify_password(password, user) into'\ ' a call with verify_password(password, user.password)' ' as soon as possible.') password_hash = password_hash.password # effectively user.password # Use passlib's CryptContext to verify a password return self.password_crypt_context.verify(password, password_hash)
PypiClean
/Lagranto-0.3.1.tar.gz/Lagranto-0.3.1/docs/tutorial_lagranto.rst
Tutorial -------- The goal of this section is show how to calculate trajectories, analyzed them and plot them using the :ref:`lagranto-package`. * `Calculation`_ * `Analyze`_ * `Plotting`_ * `Writing`_ Calculation ^^^^^^^^^^^ The :ref:`lagranto-package` provide a class, :class:`lagranto.LagrantoRun`, to wrap the Lagranto programs in python. It allow to calculate trajectories in parallel. You can take a look at the docstring to get familiar with the class. Let's say that we want to calculate Warm Conveyor Belt trajectories for a 5 day period in June 2013. Using Era-Interim we can start trajectories every 6 hour and we will calculate them for 48 hours forward in time. Since :class:`lagranto.LagrantoRun` needs a list of (startdate, enddate), we can build the dates as follow: .. code-block:: python from datetime import datetime, timedelta from dypy.small_tools import interval startdate = datetime(2013, 6, 1, 0) enddate = startdate + timedelta(days=5) dates = [(d, d + timedelta(hours=48)) for d in interval(startdate, enddate, timedelta(hours=6))] If the Era-interim data are in the `erainterim` folder and if the output files should be written in the `output` folder, then the :class:`lagranto.LagrantoRun` can be initialized as follow: .. code-block:: python from lagranto import LagrantoRun lrun = LagrantoRun(dates, workingdir='erainterim', outputdir='output', version='ecmwf') We want to start the trajectories every 20km in the box [5E, 40E, 30N, 60N], so let's create a starting file: .. code-block:: python specifier = "'box.eqd(5,20,40,50,20)@profile(850,500,10)@hPa'" out_create_startf = lrun.create_startf(startdate, specifier, tolist=True) The `tolist` argument is needed if we want to use the same staring file for all starting time of the trajectories. We can now calculate the trajectories, but first starting for a single date to test our setup: .. code-block:: python out_caltra = lrun.caltra(*dates[1]) We can also test tracing Q along a trajectories: .. code-block:: python out_trace = lrun.trace(dates[1][0], field='Q 1.') We can now calculate and trace the trajectories in parallel, but for this we will use a tracevars file: .. code-block:: python tracevars = """Q 1. 0 P U 1. 0 P """ out = lrun.run_parallel(trace_kw={'tracevars_content': tracevars}, type='both') The `tracevars_content` keyword argument will be passed to trace to create a tracevars file with Q and U. The `type` keyword argument determine what is run in parallel, currently both, trace, and caltra are available. Analyze ^^^^^^^ Now that we have calculated trajectories let's read them and analyze them. By default the name of the files are formatted as `lsl_{:%Y%m%d%H}.4`. So if we want to read the trajectories started at 00 UTC 01 June 2013 we can do as follow: .. code-block:: python from lagranto import Tra filename_template = 'output/lsl_{:%Y%m%d%H}.4' filename = filename_template.format(date=dates[-1][0]) trajs = Tra() trajs.load_netcdf(filename) print(trajs) We can now test if the trajectories fulfill the standard criteria for WCB, an ascent greater than 500 hPa in 48 hours. To make it clear, the goal of this exmple is not to replace the fortran routines of the LAGRANTO package but to illustrate the possibilities that python provides to analyze trajectories using a simple example. .. code-block:: python wcb_index = np.where((trajs['p'][:, :1] - trajs['p']) > 500) wcb_trajs = Tra() wcb_trajs.set_array(trajs[wcb_index[0], :]) print(wcb_trajs) Plotting ^^^^^^^^ Now that we have WCB trajectories, let's plot them on a map. We will use cartopy for this. .. code-block:: python import cartopy.crs as ccrs import cartopy.feature as cfeature from lagranto.plotting import plot_trajs import matplotlib.pyplot as plt crs = ccrs.Stereographic(central_longitude=180 - 170, central_latitude=90 - 43, true_scale_latitude=90 - 43) fig = plt.figure() ax = plt.axes(projection=crs) land_50m = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries', '50m', edgecolor='gray', facecolor='none', linewidth=0) ax.add_feature(land_50m) ax.set_extent([-10, 28, 30, 60]) plot_trajs(ax, wcb_trajs, 'p') # fig.savefig('wcb_trajs_{date:%Y%m%d_%H}.pdf'.format(date=dates[-1][0]), bbox_inches='tight') .. image :: images/wcb_trajs_20130529_18.png Writing ^^^^^^^ The WCB trajectories can also be written to disk as follow: .. code-block:: python wcb_trajs.write_netcdf('output/wcb_trajs.nc')
PypiClean
/BIT_framework-0.0.2-py3-none-any.whl/BIT_DL/pytorch/modules/encoders/xlnet_encoder.py
import sys from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.nn import functional as F from BIT_DL.pytorch.modules.encoders.encoder_base import EncoderBase from BIT_DL.pytorch.modules.pretrained.xlnet import PretrainedXLNetMixin from BIT_DL.pytorch.modules.pretrained.xlnet_utils import ( PositionWiseFF, RelativeMultiheadAttention, RelativePositionalEncoding, params_except_in) from BIT_DL.pytorch.utils.utils import dict_fetch, sum_tensors __all__ = [ "XLNetEncoder", ] class XLNetEncoder(EncoderBase, PretrainedXLNetMixin): r"""Raw XLNet module for encoding sequences. Please see :class:`~BIT_DL.pytorch.modules.PretrainedXLNetMixin` for a brief description of XLNet. Args: pretrained_model_name (optional): a `str`, the name of pre-trained model (e.g., ``xlnet-based-cased``). Please refer to :class:`~BIT_DL.pytorch.modules.PretrainedXLNetMixin` for all supported models. If `None`, the model name in :attr:`hparams` is used. cache_dir (optional): the path to a folder in which the pre-trained models will be cached. If `None` (default), a default directory (``texar_data`` folder under user's home directory) will be used. hparams (dict or HParams, optional): Hyperparameters. Missing hyperparameter will be set to default values. See :meth:`default_hparams` for the hyperparameter structure and default values. """ _IS_DECODE = False def __init__(self, pretrained_model_name: Optional[str] = None, cache_dir: Optional[str] = None, hparams=None): super().__init__(hparams=hparams) self.load_pretrained_config(pretrained_model_name, cache_dir) num_layers = self._hparams.num_layers num_heads = self._hparams.num_heads head_dim = self._hparams.head_dim self.word_embed = nn.Embedding(self._hparams.vocab_size, self._hparams.hidden_dim) self.pos_embed = RelativePositionalEncoding( hparams={ "dim": self._hparams.hidden_dim, "max_seq_len": self._hparams.max_seq_length, }) self.dropout = nn.Dropout(self._hparams.dropout) self.r_r_bias = None self.r_w_bias = None self.r_s_bias = None if not self._hparams.untie_r: self.r_r_bias = nn.Parameter(torch.Tensor(num_heads, head_dim)) self.r_w_bias = nn.Parameter(torch.Tensor(num_heads, head_dim)) self.r_s_bias = (nn.Parameter(torch.Tensor(num_heads, head_dim)) if self._hparams.use_segments else None) self.attn_layers = nn.ModuleList() self.ff_layers = nn.ModuleList() rel_attn_hparams = dict_fetch( self._hparams, RelativeMultiheadAttention.default_hparams()) ff_hparams = dict_fetch( self._hparams, PositionWiseFF.default_hparams()) for _ in range(num_layers): self.attn_layers.append(RelativeMultiheadAttention( self.r_r_bias, self.r_w_bias, self.r_s_bias, hparams=rel_attn_hparams)) self.ff_layers.append(PositionWiseFF(hparams=ff_hparams)) self.mask_emb = nn.Parameter( torch.Tensor(1, 1, self._hparams.hidden_dim)) if self._IS_DECODE: self.lm_bias = nn.Parameter(torch.zeros(self._hparams.vocab_size)) self.init_pretrained_weights() @staticmethod def default_hparams() -> Dict[str, Any]: r"""Returns a dictionary of hyperparameters with default values. * The encoder arch is determined by the constructor argument :attr:`pretrained_model_name` if it's specified. In this case, `hparams` are ignored. * Otherwise, the encoder arch is determined by `hparams['pretrained_model_name']` if it's specified. All other configurations in `hparams` are ignored. * If the above two are `None`, the encoder arch is defined by the configurations in `hparams` and weights are randomly initialized. .. code-block:: python { "pretrained_model_name": "xlnet-base-cased", "untie_r": True, "num_layers": 12, "mem_len": 0, "reuse_len": 0, "num_heads": 12, "hidden_dim": 768, "head_dim": 64, "dropout": 0.1, "attention_dropout": 0.1, "use_segments": True, "ffn_inner_dim": 3072, "activation": 'gelu', "vocab_size": 32000, "max_seq_length": 512, "initializer": None, "name": "xlnet_encoder", } Here: The default parameters are values for cased XLNet-Base model. `"pretrained_model_name"`: str or None The name of the pre-trained XLNet model. If None, the model will be randomly initialized. `"untie_r"`: bool Whether to untie the biases in attention. `"num_layers"`: int The number of stacked layers. `"mem_len"`: int The number of tokens to cache. `"reuse_len"`: int The number of tokens in the current batch to be cached and reused in the future. `"num_heads"`: int The number of attention heads. `"hidden_dim"`: int The hidden size. `"head_dim"`: int The dimension size of each attention head. `"dropout"`: float Dropout rate. `"attention_dropout"`: float Dropout rate on attention probabilities. `"use_segments"`: bool Whether to use segment embedding. `"ffn_inner_dim"`: int The hidden size in feed-forward layers. `"activation"`: str `relu` or `gelu`. `"vocab_size"`: int The vocabulary size. `"max_seq_length"`: int The maximum sequence length for `RelativePositionalEncoding`. `"initializer"`: dict, optional Hyperparameters of the default initializer that initializes variables created in this module. See :func:`~BIT_DL.pytorch.core.get_initializer` for details. `"name"`: str Name of the module. """ return { 'pretrained_model_name': 'xlnet-base-cased', 'untie_r': True, 'num_layers': 12, 'mem_len': 0, 'reuse_len': 0, # layer 'num_heads': 12, 'hidden_dim': 768, 'head_dim': 64, 'dropout': 0.1, 'attention_dropout': 0.1, 'use_segments': True, # ffn 'ffn_inner_dim': 3072, 'activation': 'gelu', # embedding 'vocab_size': 32000, 'max_seq_length': 512, 'initializer': None, 'name': "xlnet_encoder", '@no_typecheck': ['pretrained_model_name'], } def param_groups(self, lr: Optional[float] = None, lr_layer_scale: float = 1.0, decay_base_params: bool = False): r"""Create parameter groups for optimizers. When :attr:`lr_layer_decay_rate` is not 1.0, parameters from each layer form separate groups with different base learning rates. The return value of this method can be used in the constructor of optimizers, for example: .. code-block:: python model = XLNetEncoder(...) param_groups = model.param_groups(lr=2e-5, lr_layer_scale=0.8) optim = torch.optim.Adam(param_groups) Args: lr (float): The learning rate. Can be omitted if :attr:`lr_layer_decay_rate` is 1.0. lr_layer_scale (float): Per-layer LR scaling rate. The `i`-th layer will be scaled by `lr_layer_scale ^ (num_layers - i - 1)`. decay_base_params (bool): If `True`, treat non-layer parameters (e.g. embeddings) as if they're in layer 0. If `False`, these parameters are not scaled. Returns: The parameter groups, used as the first argument for optimizers. """ if lr_layer_scale != 1.0: if lr is None: raise ValueError( "lr must be specified when lr_layer_decay_rate is not 1.0") num_layers = self._hparams.num_layers base_group = { "params": params_except_in( self, ['attn_layers', 'ff_layers']), "lr": lr * (lr_layer_scale ** num_layers if decay_base_params else 1.0) } param_groups = [base_group] for idx in range(num_layers): decay_rate = lr_layer_scale ** (num_layers - idx - 1) param_group = { "params": [*self.attn_layers[idx].parameters(), *self.ff_layers[idx].parameters()], "lr": lr * decay_rate, } param_groups.append(param_group) return param_groups return self.parameters() @property def output_size(self): r"""The feature size of :meth:`forward` output. """ return self._hparams.hidden_dim @staticmethod def _cache_mem(output: torch.Tensor, prev_mem: Optional[torch.Tensor], mem_len: int, reuse_len: int = 0) -> torch.Tensor: r"""Cache hidden states into memory.""" assert mem_len > 0 if reuse_len is not None and reuse_len > 0: output = output[:reuse_len] if prev_mem is None: new_mem = output[-mem_len:] else: new_mem = torch.cat([prev_mem, output], dim=0)[-mem_len:] return new_mem.detach() def _create_causal_attn_mask(self, seq_len: int, mem_len: int, same_length: bool = False) -> torch.Tensor: r"""Create causal attention mask of shape `(seq_len, mem_len + seq_len)`. """ assert self.r_w_bias is not None device = self.r_w_bias.device attn_mask = torch.ones(seq_len, seq_len, device=device) mask_u = torch.triu(attn_mask, diagonal=1) attn_mask_pad = torch.zeros(seq_len, mem_len, device=device) ret = torch.cat([attn_mask_pad, mask_u], dim=1) if same_length: mask_l = torch.tril(attn_mask, diagonal=-1) ret = torch.cat([ret[:, :seq_len] + mask_l, ret[:, seq_len:]], 1) return ret def forward(self, # type: ignore inputs: Union[torch.Tensor, torch.LongTensor], segment_ids: Optional[torch.LongTensor] = None, input_mask: Optional[torch.Tensor] = None, memory: Optional[List[torch.Tensor]] = None, permute_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, bi_data: bool = False, clamp_len: Optional[int] = None, cache_len: int = 0, same_length: bool = False, attn_type: str = 'bi', two_stream: bool = False) \ -> Tuple[torch.Tensor, Optional[List[torch.Tensor]]]: r"""Compute XLNet representations for the input. Args: inputs: Either a **2D Tensor** of shape `[batch_size, max_time]`, containing the ids of tokens in input sequences, or a **3D Tensor** of shape `[batch_size, max_time, vocab_size]`, containing soft token ids (i.e., weights or probabilities) used to mix the embedding vectors. segment_ids: Shape `[batch_size, max_time]`. input_mask: Float tensor of shape `[batch_size, max_time]`. Note that positions with value 1 are masked out. memory: Memory from previous batches. A list of length `num_layers`, each tensor of shape `[batch_size, mem_len, hidden_dim]`. permute_mask: The permutation mask. Float tensor of shape `[batch_size, max_time, max_time]`. A value of 0 for ``permute_mask[i, j, k]`` indicates that position `i` attends to position `j` in batch `k`. target_mapping: The target token mapping. Float tensor of shape `[batch_size, num_targets, max_time]`. A value of 1 for ``target_mapping[i, j, k]`` indicates that the `i`-th target token (in order of permutation) in batch `k` is the token at position `j`. Each row ``target_mapping[i, :, k]`` can have no more than one value of 1. bi_data (bool): Whether to use bidirectional data input pipeline. clamp_len (int): Clamp all relative distances larger than :attr:`clamp_len`. A value of -1 means no clamping. cache_len (int): Length of memory (number of tokens) to cache. same_length (bool): Whether to use the same attention length for each token. attn_type (str): Attention type. Supported values are `"uni"` and `"bi"`. two_stream (bool): Whether to use two-stream attention. Only set to `True` when pre-training or generating text. Defaults to `False`. :returns: A tuple of `(output, new_memory)`: - **`output`**: The final layer output representations. Shape `[batch_size, max_time, hidden_dim]`. - **`new_memory`**: The memory of the current batch. If `cache_len` is 0, then `new_memory` is `None`. Otherwise, it is a list of length `num_layers`, each tensor of shape `[batch_size, cache_len, hidden_dim]`. This can be used as the :attr:`memory` argument in the next batch. """ if inputs.dim() == 2: word_embeds = self.word_embed(inputs) elif inputs.dim() == 3: word_embeds = torch.tensordot(inputs, self.word_embed.weight, dims=([-1], [0])) else: raise ValueError("'inputs' should be a 2D or 3D tensor.") return self._forward(word_embed=word_embeds, segment_ids=segment_ids, input_mask=input_mask, memory=memory, permute_mask=permute_mask, target_mapping=target_mapping, bi_data=bi_data, clamp_len=clamp_len, cache_len=cache_len, same_length=same_length, attn_type=attn_type, two_stream=two_stream) def _forward(self, word_embed: torch.Tensor, segment_ids: Optional[torch.LongTensor] = None, input_mask: Optional[torch.Tensor] = None, memory: Optional[List[torch.Tensor]] = None, permute_mask: Optional[torch.Tensor] = None, target_mapping: Optional[torch.Tensor] = None, bi_data: bool = False, clamp_len: Optional[int] = None, cache_len: int = 0, same_length: bool = False, attn_type: str = 'bi', two_stream: bool = False) \ -> Tuple[torch.Tensor, Optional[List[torch.Tensor]]]: r"""Compute XLNet representations for the input. This layer exists because :class:`XLNetDecoder` compute embeddings in the decoder helper. `word_embed` has shape `[batch_size, max_time, word_embed_dim]`. Please refer to :meth:`forward` for the detailed information of other arguments. """ # seq_len == max_time # word_embed: [seq_len, batch_size, word_embed_dim] word_embed = word_embed.permute(1, 0, 2) # segment_ids: [seq_len, batch_size] if segment_ids is not None: segment_ids = segment_ids.permute(1, 0) # input_mask: [seq_len, batch_size] if input_mask is not None: input_mask = input_mask.permute(1, 0) # memory: A list of length num_layers # each tensor of shape [mem_len, batch_size, hidden_dim] if memory is not None: memory = [m.permute(1, 0, 2) for m in memory] # permute_mask: [seq_len, seq_len, batch_size] if permute_mask is not None: permute_mask = permute_mask.permute(1, 2, 0) # target_mapping: [num_targets, seq_len, batch_size] if target_mapping is not None: target_mapping = target_mapping.permute(1, 2, 0) seq_len, batch_size = word_embed.size()[:2] mem_len = memory[0].size(0) if memory is not None else 0 tot_len = seq_len + mem_len reuse_len = self._hparams.reuse_len # Construct masks. masks: List[Optional[torch.Tensor]] = [] # Causal attention mask. if attn_type == 'uni': causal_mask = self._create_causal_attn_mask( seq_len, mem_len, same_length) # attn_mask: (seq_len, tot_len, 1, 1) causal_mask = causal_mask.unsqueeze(2).unsqueeze(3) masks.append(causal_mask) elif attn_type == 'bi': pass else: raise ValueError(f"Unsupported attention type: {attn_type}") # Data mask: input mask & permutation mask. if input_mask is not None: input_mask = input_mask.expand(seq_len, -1, -1) data_mask = sum_tensors([input_mask, permute_mask]) if data_mask is not None: # All positions in memory can be attended to. memory_mask = data_mask.new_zeros(seq_len, mem_len, batch_size) # data_mask: (seq_len, tot_len, batch_size, 1) data_mask = torch.cat([memory_mask, data_mask], dim=1).unsqueeze(3) masks.append(data_mask) # Exclude the main diagonal (target tokens) from the mask. attn_mask = sum_tensors(masks) if attn_mask is None: final_mask = None else: attn_mask = (attn_mask > 0) final_mask = -torch.eye(seq_len, device=attn_mask.device) final_mask = torch.cat([ final_mask.new_zeros(seq_len, mem_len), final_mask], dim=-1) final_mask = final_mask.unsqueeze(2).unsqueeze(3) # final_mask: (seq_len, tot_len, batch_size, 1) final_mask = ((attn_mask.float() + final_mask) > 0) # Construct segment embedding. if segment_ids is not None: concat_segment_ids = torch.cat([ segment_ids.new_zeros(mem_len, batch_size), segment_ids]) segment_matrix = (segment_ids.unsqueeze(1) != concat_segment_ids.unsqueeze(0)).long() segment_matrix = F.one_hot(segment_matrix, num_classes=2).float() else: segment_matrix = None pos_embed = self.pos_embed( batch_size, seq_len, tot_len, clamp_len, attn_type, bi_data) pos_embed = self.dropout(pos_embed) states_h = self.dropout(word_embed) states_g = None if two_stream: if target_mapping is not None: word_embed_q = self.mask_emb.expand( target_mapping.size(0), batch_size, -1) else: word_embed_q = word_embed states_g = self.dropout(word_embed_q) new_memory = [] for idx in range(self._hparams.num_layers): cur_memory = memory[idx] if memory is not None else None if cache_len > 0: new_memory.append(self._cache_mem( states_h, cur_memory, cache_len, reuse_len)) attn_layer: RelativeMultiheadAttention attn_layer = self.attn_layers[idx] # type: ignore states_h, states_g = attn_layer( states_h=states_h, states_g=states_g, pos_embed=pos_embed, segment_mat=segment_matrix, attn_mask_h=final_mask, attn_mask_g=attn_mask, target_mapping=target_mapping, memory=cur_memory) states_h = self.ff_layers[idx](states_h) if states_g is not None: states_g = self.ff_layers[idx](states_g) output = self.dropout(states_h if states_g is None else states_g) # Now output: [seq_len, batch_size, hidden_dim] # new_memory: None or A list of length num_layers, # each tensor of shape [cache_len, batch_size, hidden_dim] output = output.permute(1, 0, 2) if new_memory is not None: new_memory = [m.permute(1, 0, 2) for m in new_memory] if cache_len == 0: return output, None return output, new_memory
PypiClean
/CI_CloudConnector-0.60.zip/CI_CloudConnector-0.60/README.txt
Cloud Connector Python ----------------------------- Created by Idop - 21.6.2016 reads Modbuc TCP \ EtherNetIP \ Simulation tags and send to web service using OAuth authentication Runs on Python 2.7.9 and tested on raspberi Pi3 - debian Jesi OS How to Operate: ------------------------------ After installing correctly all componenets you can run sudo python CI_CloudConnector.py help This will show all operating options after testing is done you can operate application in production mode using sudo python CI_CloudConnector.py MainLoop Install Instructions ------------------------------ Project is uploaded to Open source directory "PIP" run the next command to download and install the module sudo pip install CI_CloudConnector Trouble shotting ------------------------ ERROR: if pyhot.h missing run Soultion: sudo apt-get install python-dev ERROR: : error message when upgrading 'module object has no attribute _handlerList' Soultion: run 'sudo pip uninstall logging' running the application ------------------------------- run the module from where it was deployed with help option to see instructions importent: in the first run a config file will be created in /CI_LC/config.ini you must edit this file with server defenitions sudo python CI_LocalConnector.py help * dependencies should be installed automaticly (required modules : pip , pymodbus , cpppo running the apllication at startup (linux) ------------------------------------------------ edit startupscript sudo nano /etc/init.d/idopStartUp.sh sudo chmod 755 /etc/init.d/idopStartUp.sh sudo update-rc.d idopStartUp.sh defaults to remove from startup sudo update-rc.d -f idopStartUp.sh remove inside this file we just call another shell idopStartUp.sh examples ------------- #! /bin/sh # /etc/init.d/idopStartUp.sh bash /home/pi/CI_Projects/launcher.sh & #!/bin/sh # launcher.sh #navigate home than to this directory then back home cd / cd /home/pi/CI_Projects export PATH="$PATH:/usr/lib/python2.7:/usr/lib/python2.7/plat-arm-linux-gnueabihf:/usr/lib/python2.7/lib-tk:/usr/lib/python2.7/lib-old:/usr/lib/python2.7/lib-dynload:/home/pi/.local/lib/python2.7/site-packages:/usr/local/lib/python2.7/dist-packages:/usr/lib/python2.7/dist-packages:/usr/lib/python2.7/dist-packages/PILcompat:/usr/lib/python2.7/dist-packages/gtk-2.0:/usr/lib/pymodules/python2.7" sudo python CI_LocalConnector.py MainLoop cd /
PypiClean
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/tracing/datadog.py
from __future__ import annotations import hashlib from inspect import isawaitable from typing import TYPE_CHECKING, Any, Generator, Iterator, Optional from ddtrace import tracer from strawberry.extensions import SchemaExtension from strawberry.extensions.tracing.utils import should_skip_tracing from strawberry.utils.cached_property import cached_property if TYPE_CHECKING: from strawberry.types.execution import ExecutionContext class DatadogTracingExtension(SchemaExtension): def __init__( self, *, execution_context: Optional[ExecutionContext] = None, ): if execution_context: self.execution_context = execution_context @cached_property def _resource_name(self): assert self.execution_context.query query_hash = self.hash_query(self.execution_context.query) if self.execution_context.operation_name: return f"{self.execution_context.operation_name}:{query_hash}" return query_hash def hash_query(self, query: str) -> str: return hashlib.md5(query.encode("utf-8")).hexdigest() def on_operation(self) -> Iterator[None]: self._operation_name = self.execution_context.operation_name span_name = ( f"{self._operation_name}" if self._operation_name else "Anonymous Query" ) self.request_span = tracer.trace( span_name, resource=self._resource_name, span_type="graphql", service="strawberry", ) self.request_span.set_tag("graphql.operation_name", self._operation_name) operation_type = "query" assert self.execution_context.query if self.execution_context.query.strip().startswith("mutation"): operation_type = "mutation" if self.execution_context.query.strip().startswith("subscription"): operation_type = "subscription" self.request_span.set_tag("graphql.operation_type", operation_type) yield self.request_span.finish() def on_validate(self) -> Generator[None, None, None]: self.validation_span = tracer.trace("Validation", span_type="graphql") yield self.validation_span.finish() def on_parse(self) -> Generator[None, None, None]: self.parsing_span = tracer.trace("Parsing", span_type="graphql") yield self.parsing_span.finish() async def resolve(self, _next, root, info, *args, **kwargs) -> Any: if should_skip_tracing(_next, info): result = _next(root, info, *args, **kwargs) if isawaitable(result): # pragma: no cover result = await result return result field_path = f"{info.parent_type}.{info.field_name}" with tracer.trace(f"Resolving: {field_path}", span_type="graphql") as span: span.set_tag("graphql.field_name", info.field_name) span.set_tag("graphql.parent_type", info.parent_type.name) span.set_tag("graphql.field_path", field_path) span.set_tag("graphql.path", ".".join(map(str, info.path.as_list()))) result = _next(root, info, *args, **kwargs) if isawaitable(result): result = await result return result class DatadogTracingExtensionSync(DatadogTracingExtension): def resolve(self, _next, root, info, *args, **kwargs) -> Any: if should_skip_tracing(_next, info): return _next(root, info, *args, **kwargs) field_path = f"{info.parent_type}.{info.field_name}" with tracer.trace(f"Resolving: {field_path}", span_type="graphql") as span: span.set_tag("graphql.field_name", info.field_name) span.set_tag("graphql.parent_type", info.parent_type.name) span.set_tag("graphql.field_path", field_path) span.set_tag("graphql.path", ".".join(map(str, info.path.as_list()))) return _next(root, info, *args, **kwargs)
PypiClean
/Arachne-0.5.0.tar.gz/Arachne-0.5.0/CHANGES.md
Changelog ========= Here you will find the full list of changes between each Arachne release Version 0.5.0 (Nov 20th 2016) --------------------- - Add support for Scrapy 1.0 ([#4](https://github.com/kirankoduru/arachne/issues/4)) Version 0.4.0 (Mar 17th 2016) ----------------------------- - Renamed the endpoints `/spiders` as `/` for more intuitive purposes - ([#8](https://github.com/kirankoduru/arachne/issues/8)) - The `/run-spider` endpoint returns the name of spider and the status of the spider as running Version 0.3.1 (Nov 25th 2015) ----------------------------- - [BUG FIX] Whoops! Forgot to test if there were individual spider `scrapy_settings` available Version 0.3.0 (Nov 23rd 2015) ----------------------------- - Add individual spider settings to the `scrapy_settings` variable - Add global spider settings to the `SCRAPY_SETTINGS` variable Version 0.2.0 (Nov 15th 2015) ----------------------------- - Export to CSV and JSON pipeline now available Version 0.1.0 (Nov 14th 2015) ----------------------------- - First public preview release
PypiClean
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/widgets/data/owsql.py
import sys from collections import OrderedDict from AnyQt.QtWidgets import ( QLineEdit, QComboBox, QTextEdit, QMessageBox, QSizePolicy, QApplication) from AnyQt.QtGui import QCursor from AnyQt.QtCore import Qt, QTimer from Orange.canvas import report from Orange.data import Table from Orange.data.sql.backend import Backend from Orange.data.sql.backend.base import BackendError from Orange.data.sql.table import SqlTable, LARGE_TABLE, AUTO_DL_LIMIT from Orange.widgets import gui from Orange.widgets.credentials import CredentialManager from Orange.widgets.settings import Setting from Orange.widgets.utils.itemmodels import PyListModel from Orange.widgets.widget import OWWidget, Output, Msg MAX_DL_LIMIT = 1000000 class TableModel(PyListModel): def data(self, index, role=Qt.DisplayRole): row = index.row() if role == Qt.DisplayRole: return str(self[row]) return super().data(index, role) class BackendModel(PyListModel): def data(self, index, role=Qt.DisplayRole): row = index.row() if role == Qt.DisplayRole: return self[row].display_name return super().data(index, role) class OWSql(OWWidget): name = "SQL表格" id = "orange.widgets.data.sql" description = "从SQL加载数据集" icon = "icons/SQLTable.svg" priority = 30 category = "Data" keywords = ["data", "file", "load", "read", "SQL"] class Outputs: data = Output("数据", Table, doc="Attribute-valued dataset read from the input file.") settings_version = 2 want_main_area = False resizing_enabled = False host = Setting(None) port = Setting(None) database = Setting(None) schema = Setting(None) username = "" password = "" table = Setting(None) sql = Setting("") guess_values = Setting(True) download = Setting(False) materialize = Setting(False) materialize_table_name = Setting("") class Information(OWWidget.Information): data_sampled = Msg("Data description was generated from a sample.") class Error(OWWidget.Error): connection = Msg("{}") no_backends = Msg("Please install a backend to use this widget") missing_extension = Msg("Database is missing extension{}: {}") def __init__(self): super().__init__() self.backend = None self.data_desc_table = None self.database_desc = None vbox = gui.vBox(self.controlArea, "Server", addSpace=True) box = gui.vBox(vbox) self.backends = BackendModel(Backend.available_backends()) self.backendcombo = QComboBox(box) if len(self.backends): self.backendcombo.setModel(self.backends) else: self.Error.no_backends() box.setEnabled(False) box.layout().addWidget(self.backendcombo) self.servertext = QLineEdit(box) self.servertext.setPlaceholderText('Server') self.servertext.setToolTip('Server') self.servertext.editingFinished.connect(self._load_credentials) if self.host: self.servertext.setText(self.host if not self.port else '{}:{}'.format(self.host, self.port)) box.layout().addWidget(self.servertext) self.databasetext = QLineEdit(box) self.databasetext.setPlaceholderText('Database[/Schema]') self.databasetext.setToolTip('Database or optionally Database/Schema') if self.database: self.databasetext.setText( self.database if not self.schema else '{}/{}'.format(self.database, self.schema)) box.layout().addWidget(self.databasetext) self.usernametext = QLineEdit(box) self.usernametext.setPlaceholderText('Username') self.usernametext.setToolTip('Username') box.layout().addWidget(self.usernametext) self.passwordtext = QLineEdit(box) self.passwordtext.setPlaceholderText('Password') self.passwordtext.setToolTip('Password') self.passwordtext.setEchoMode(QLineEdit.Password) box.layout().addWidget(self.passwordtext) self._load_credentials() self.tables = TableModel() tables = gui.hBox(box) self.tablecombo = QComboBox( minimumContentsLength=35, sizeAdjustPolicy=QComboBox.AdjustToMinimumContentsLength ) self.tablecombo.setModel(self.tables) self.tablecombo.setToolTip('table') tables.layout().addWidget(self.tablecombo) self.connect() index = self.tablecombo.findText(str(self.table)) if index != -1: self.tablecombo.setCurrentIndex(index) # set up the callback to select_table in case of selection change self.tablecombo.activated[int].connect(self.select_table) self.connectbutton = gui.button( tables, self, '↻', callback=self.connect) self.connectbutton.setSizePolicy( QSizePolicy.Fixed, QSizePolicy.Fixed) tables.layout().addWidget(self.connectbutton) self.custom_sql = gui.vBox(box) self.custom_sql.setVisible(False) self.sqltext = QTextEdit(self.custom_sql) self.sqltext.setPlainText(self.sql) self.custom_sql.layout().addWidget(self.sqltext) mt = gui.hBox(self.custom_sql) cb = gui.checkBox(mt, self, 'materialize', 'Materialize to table ') cb.setToolTip('Save results of the query in a table') le = gui.lineEdit(mt, self, 'materialize_table_name') le.setToolTip('Save results of the query in a table') self.executebtn = gui.button( self.custom_sql, self, 'Execute', callback=self.open_table) box.layout().addWidget(self.custom_sql) gui.checkBox(box, self, "guess_values", "Auto-discover categorical variables", callback=self.open_table) gui.checkBox(box, self, "download", "Download data to local memory", callback=self.open_table) gui.rubber(self.buttonsArea) QTimer.singleShot(0, self.select_table) def _load_credentials(self): self._parse_host_port() cm = self._credential_manager(self.host, self.port) self.username = cm.username self.password = cm.password if self.username: self.usernametext.setText(self.username) if self.password: self.passwordtext.setText(self.password) def _save_credentials(self): cm = self._credential_manager(self.host, self.port) cm.username = self.username or '' cm.password = self.password or '' def _credential_manager(self, host, port): return CredentialManager("SQL Table: {}:{}".format(host, port)) def error(self, id=0, text=""): super().error(id, text) err_style = 'QLineEdit {border: 2px solid red;}' if 'server' in text or 'host' in text: self.servertext.setStyleSheet(err_style) else: self.servertext.setStyleSheet('') if 'role' in text: self.usernametext.setStyleSheet(err_style) else: self.usernametext.setStyleSheet('') if 'database' in text: self.databasetext.setStyleSheet(err_style) else: self.databasetext.setStyleSheet('') def _parse_host_port(self): hostport = self.servertext.text().split(':') self.host = hostport[0] self.port = hostport[1] if len(hostport) == 2 else None def connect(self): self._parse_host_port() self.database, _, self.schema = self.databasetext.text().partition('/') self.username = self.usernametext.text() or None self.password = self.passwordtext.text() or None try: if self.backendcombo.currentIndex() < 0: return backend = self.backends[self.backendcombo.currentIndex()] self.backend = backend(dict( host=self.host, port=self.port, database=self.database, user=self.username, password=self.password )) self.Error.connection.clear() self._save_credentials() self.database_desc = OrderedDict(( ("Host", self.host), ("Port", self.port), ("Database", self.database), ("User name", self.username) )) self.refresh_tables() except BackendError as err: error = str(err).split('\n')[0] self.Error.connection(error) self.database_desc = self.data_desc_table = None self.tablecombo.clear() def refresh_tables(self): self.tables.clear() self.Error.missing_extension.clear() if self.backend is None: self.data_desc_table = None return self.tables.append("Select a table") self.tables.append("Custom SQL") self.tables.extend(self.backend.list_tables(self.schema)) # Called on tablecombo selection change: def select_table(self): curIdx = self.tablecombo.currentIndex() if self.tablecombo.itemText(curIdx) != "Custom SQL": self.custom_sql.setVisible(False) return self.open_table() else: self.custom_sql.setVisible(True) self.data_desc_table = None self.database_desc["Table"] = "(None)" self.table = None if len(str(self.sql)) > 14: return self.open_table() #self.Error.missing_extension( # 's' if len(missing) > 1 else '', # ', '.join(missing), # shown=missing) def open_table(self): table = self.get_table() self.data_desc_table = table self.Outputs.data.send(table) def get_table(self): curIdx = self.tablecombo.currentIndex() if curIdx <= 0: if self.database_desc: self.database_desc["Table"] = "(None)" self.data_desc_table = None return if self.tablecombo.itemText(curIdx) != "Custom SQL": self.table = self.tables[self.tablecombo.currentIndex()] self.database_desc["Table"] = self.table if "Query" in self.database_desc: del self.database_desc["Query"] what = self.table else: what = self.sql = self.sqltext.toPlainText() self.table = "Custom SQL" if self.materialize: import psycopg2 if not self.materialize_table_name: self.Error.connection( "Specify a table name to materialize the query") return try: with self.backend.execute_sql_query("DROP TABLE IF EXISTS " + self.materialize_table_name): pass with self.backend.execute_sql_query("CREATE TABLE " + self.materialize_table_name + " AS " + self.sql): pass with self.backend.execute_sql_query("ANALYZE " + self.materialize_table_name): pass except (psycopg2.ProgrammingError, BackendError) as ex: self.Error.connection(str(ex)) return try: table = SqlTable(dict(host=self.host, port=self.port, database=self.database, user=self.username, password=self.password), what, backend=type(self.backend), inspect_values=False) except BackendError as ex: self.Error.connection(str(ex)) return self.Error.connection.clear() sample = False if table.approx_len() > LARGE_TABLE and self.guess_values: confirm = QMessageBox(self) confirm.setIcon(QMessageBox.Warning) confirm.setText("Attribute discovery might take " "a long time on large tables.\n" "Do you want to auto discover attributes?") confirm.addButton("Yes", QMessageBox.YesRole) no_button = confirm.addButton("No", QMessageBox.NoRole) sample_button = confirm.addButton("Yes, on a sample", QMessageBox.YesRole) confirm.exec() if confirm.clickedButton() == no_button: self.guess_values = False elif confirm.clickedButton() == sample_button: sample = True self.Information.clear() if self.guess_values: QApplication.setOverrideCursor(QCursor(Qt.WaitCursor)) if sample: s = table.sample_time(1) domain = s.get_domain(inspect_values=True) self.Information.data_sampled() else: domain = table.get_domain(inspect_values=True) QApplication.restoreOverrideCursor() table.domain = domain if self.download: if table.approx_len() > MAX_DL_LIMIT: QMessageBox.warning( self, 'Warning', "Data is too big to download.\n" "Consider using the Data Sampler widget to download " "a sample instead.") self.download = False elif table.approx_len() > AUTO_DL_LIMIT: confirm = QMessageBox.question( self, 'Question', "Data appears to be big. Do you really " "want to download it to local memory?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if confirm == QMessageBox.No: self.download = False if self.download: table.download_data(MAX_DL_LIMIT) table = Table(table) return table def send_report(self): if not self.database_desc: self.report_paragraph("No database connection.") return self.report_items("Database", self.database_desc) if self.data_desc_table: self.report_items("Data", report.describe_data(self.data_desc_table)) @classmethod def migrate_settings(cls, settings, version): if version < 2: # Until Orange version 3.4.4 username and password had been stored # in Settings. cm = cls._credential_manager(settings["host"], settings["port"]) cm.username = settings["username"] cm.password = settings["password"] if __name__ == "__main__": a = QApplication(sys.argv) ow = OWSql() ow.show() a.exec_() ow.saveSettings()
PypiClean
/MTGA-0.9.5-py3-none-any.whl/mtga/set_data/eld.py
import sys from mtga.models.card import Card from mtga.models.card_set import Set import inspect AllThatGlitters = Card(name="all_that_glitters", pretty_name="All That Glitters", cost=['1', 'W'], color_identity=['W'], card_type="Enchantment", sub_types="Aura", abilities=[1027, 136335], set_id="ELD", rarity="Uncommon", collectible=True, set_number=2, mtga_id=70149) ShiningArmor = Card(name="shining_armor", pretty_name="Shining Armor", cost=['1', 'W'], color_identity=['W'], card_type="Artifact", sub_types="Equipment", abilities=[7, 136241, 7610, 1156], set_id="ELD", rarity="Common", collectible=True, set_number=29, mtga_id=70176) VenerableKnight = Card(name="venerable_knight", pretty_name="Venerable Knight", cost=['W'], color_identity=['W'], card_type="Creature", sub_types="Human Knight", abilities=[136079], set_id="ELD", rarity="Uncommon", collectible=True, set_number=35, mtga_id=70182) AnimatingFaerie = Card(name="animating_faerie", pretty_name="Animating Faerie", cost=['2', 'U'], color_identity=['U'], card_type="Creature", sub_types="Faerie", abilities=[8], set_id="ELD", rarity="Uncommon", collectible=True, set_number=38, mtga_id=70185) CorridorMonitor = Card(name="corridor_monitor", pretty_name="Corridor Monitor", cost=['1', 'U'], color_identity=['U'], card_type="Artifact Creature", sub_types="Construct", abilities=[136084], set_id="ELD", rarity="Common", collectible=True, set_number=41, mtga_id=70188) FaerieVandal = Card(name="faerie_vandal", pretty_name="Faerie Vandal", cost=['1', 'U'], color_identity=['U'], card_type="Creature", sub_types="Faerie Rogue", abilities=[7, 8, 136108], set_id="ELD", rarity="Uncommon", collectible=True, set_number=45, mtga_id=70192) Frogify = Card(name="frogify", pretty_name="Frogify", cost=['1', 'U'], color_identity=['U'], card_type="Enchantment", sub_types="Aura", abilities=[1027, 136125], set_id="ELD", rarity="Uncommon", collectible=True, set_number=47, mtga_id=70194) RunAwayTogether = Card(name="run_away_together", pretty_name="Run Away Together", cost=['1', 'U'], color_identity=['U'], card_type="Instant", sub_types="", abilities=[136218], set_id="ELD", rarity="Common", collectible=True, set_number=62, mtga_id=70209) WitchingWell = Card(name="witching_well", pretty_name="Witching Well", cost=['U'], color_identity=['U'], card_type="Artifact", sub_types="", abilities=[100685, 136240], set_id="ELD", rarity="Common", collectible=True, set_number=74, mtga_id=70221) BakeintoaPie = Card(name="bake_into_a_pie", pretty_name="Bake into a Pie", cost=['2', 'B', 'B'], color_identity=['B'], card_type="Instant", sub_types="", abilities=[136246], set_id="ELD", rarity="Common", collectible=True, set_number=76, mtga_id=70223) BelleoftheBrawl = Card(name="belle_of_the_brawl", pretty_name="Belle of the Brawl", cost=['2', 'B'], color_identity=['B'], card_type="Creature", sub_types="Human Knight", abilities=[142, 136248], set_id="ELD", rarity="Uncommon", collectible=True, set_number=78, mtga_id=70225) FoulmireKnight = Card(name="foulmire_knight", pretty_name="Foulmire Knight", cost=['B'], color_identity=['B'], card_type="Creature", sub_types="Zombie Knight", abilities=[1], set_id="ELD", rarity="Uncommon", collectible=True, set_number=90, mtga_id=70237) OrderofMidnight = Card(name="order_of_midnight", pretty_name="Order of Midnight", cost=['1', 'B'], color_identity=['B'], card_type="Creature", sub_types="Human Knight", abilities=[8, 86476], set_id="ELD", rarity="Uncommon", collectible=True, set_number=99, mtga_id=70246) SmittenSwordmaster = Card(name="smitten_swordmaster", pretty_name="Smitten Swordmaster", cost=['1', 'B'], color_identity=['B'], card_type="Creature", sub_types="Human Knight", abilities=[12], set_id="ELD", rarity="Common", collectible=True, set_number=105, mtga_id=70252) SyrKonradtheGrim = Card(name="syr_konrad_the_grim", pretty_name="Syr Konrad, the Grim", cost=['3', 'B', 'B'], color_identity=['B'], card_type="Creature", sub_types="Human Knight", abilities=[136076, 136077], set_id="ELD", rarity="Uncommon", collectible=True, set_number=107, mtga_id=70254) CrystalSlipper = Card(name="crystal_slipper", pretty_name="Crystal Slipper", cost=['1', 'R'], color_identity=['R'], card_type="Artifact", sub_types="Equipment", abilities=[136299, 1268], set_id="ELD", rarity="Common", collectible=True, set_number=119, mtga_id=70266) EmberethShieldbreaker = Card(name="embereth_shieldbreaker", pretty_name="Embereth Shieldbreaker", cost=['1', 'R'], color_identity=['R'], card_type="Creature", sub_types="Human Knight", abilities=[], set_id="ELD", rarity="Uncommon", collectible=True, set_number=122, mtga_id=70269) BeanstalkGiant = Card(name="beanstalk_giant", pretty_name="Beanstalk Giant", cost=['6', 'G'], color_identity=['G'], card_type="Creature", sub_types="Giant", abilities=[88259], set_id="ELD", rarity="Uncommon", collectible=True, set_number=149, mtga_id=70296) KeeperofFables = Card(name="keeper_of_fables", pretty_name="Keeper of Fables", cost=['3', 'G', 'G'], color_identity=['G'], card_type="Creature", sub_types="Cat", abilities=[136258], set_id="ELD", rarity="Uncommon", collectible=True, set_number=163, mtga_id=70310) RosethornAcolyte = Card(name="rosethorn_acolyte", pretty_name="Rosethorn Acolyte", cost=['2', 'G'], color_identity=['G'], card_type="Creature", sub_types="Elf Druid", abilities=[1055], set_id="ELD", rarity="Common", collectible=True, set_number=174, mtga_id=70321) GarrukCursedHuntsman = Card(name="garruk_cursed_huntsman", pretty_name="Garruk, Cursed Huntsman", cost=['4', 'B', 'G'], color_identity=['B', 'G'], card_type="Planeswalker", sub_types="Garruk", abilities=[136126, 104880, 136128], set_id="ELD", rarity="Mythic Rare", collectible=True, set_number=191, mtga_id=70338) InspiringVeteran = Card(name="inspiring_veteran", pretty_name="Inspiring Veteran", cost=['R', 'W'], color_identity=['R', 'W'], card_type="Creature", sub_types="Human Knight", abilities=[119104], set_id="ELD", rarity="Uncommon", collectible=True, set_number=194, mtga_id=70341) MaraleafPixie = Card(name="maraleaf_pixie", pretty_name="Maraleaf Pixie", cost=['G', 'U'], color_identity=['G', 'U'], card_type="Creature", sub_types="Faerie", abilities=[8, 18504], set_id="ELD", rarity="Uncommon", collectible=True, set_number=196, mtga_id=70343) SavvyHunter = Card(name="savvy_hunter", pretty_name="Savvy Hunter", cost=['1', 'B', 'G'], color_identity=['B', 'G'], card_type="Creature", sub_types="Human Warrior", abilities=[136251, 136147], set_id="ELD", rarity="Uncommon", collectible=True, set_number=200, mtga_id=70347) Shinechaser = Card(name="shinechaser", pretty_name="Shinechaser", cost=['1', 'W', 'U'], color_identity=['W', 'U'], card_type="Creature", sub_types="Faerie", abilities=[8, 15, 103355, 136148], set_id="ELD", rarity="Uncommon", collectible=True, set_number=201, mtga_id=70348) SteelclawLance = Card(name="steelclaw_lance", pretty_name="Steelclaw Lance", cost=['B', 'R'], color_identity=['B', 'R'], card_type="Artifact", sub_types="Equipment", abilities=[2512, 136149, 1156], set_id="ELD", rarity="Uncommon", collectible=True, set_number=202, mtga_id=70349) WintermoorCommander = Card(name="wintermoor_commander", pretty_name="Wintermoor Commander", cost=['W', 'B'], color_identity=['W', 'B'], card_type="Creature", sub_types="Human Knight", abilities=[1, 136151, 136152], set_id="ELD", rarity="Uncommon", collectible=True, set_number=205, mtga_id=70352) ArcanistsOwl = Card(name="arcanists_owl", pretty_name="Arcanist's Owl", cost=['(W/U)', '(W/U)', '(W/U)', '(W/U)'], color_identity=['W', 'U'], card_type="Artifact Creature", sub_types="Bird", abilities=[8, 136153], set_id="ELD", rarity="Uncommon", collectible=True, set_number=206, mtga_id=70353) FirebornKnight = Card(name="fireborn_knight", pretty_name="Fireborn Knight", cost=['(R/W)', '(R/W)', '(R/W)', '(R/W)'], color_identity=['R', 'W'], card_type="Creature", sub_types="Human Knight", abilities=[3, 136269], set_id="ELD", rarity="Uncommon", collectible=True, set_number=210, mtga_id=70357) GoldenEgg = Card(name="golden_egg", pretty_name="Golden Egg", cost=['2'], color_identity=[], card_type="Artifact", sub_types="Food", abilities=[86788, 88207, 136278], set_id="ELD", rarity="Common", collectible=True, set_number=220, mtga_id=70367) HeraldicBanner = Card(name="heraldic_banner", pretty_name="Heraldic Banner", cost=['3'], color_identity=[], card_type="Artifact", sub_types="", abilities=[88237, 136279, 2374], set_id="ELD", rarity="Uncommon", collectible=True, set_number=222, mtga_id=70369) ShamblingSuit = Card(name="shambling_suit", pretty_name="Shambling Suit", cost=['3'], color_identity=[], card_type="Artifact Creature", sub_types="Construct", abilities=[1316], set_id="ELD", rarity="Uncommon", collectible=True, set_number=230, mtga_id=70377) WitchsOven = Card(name="witchs_oven", pretty_name="Witch's Oven", cost=['1'], color_identity=[], card_type="Artifact", sub_types="", abilities=[136178], set_id="ELD", rarity="Uncommon", collectible=True, set_number=237, mtga_id=70384) TournamentGrounds = Card(name="tournament_grounds", pretty_name="Tournament Grounds", cost=[], color_identity=['W', 'B', 'R'], card_type="Land", sub_types="", abilities=[1152, 136198], set_id="ELD", rarity="Uncommon", collectible=True, set_number=248, mtga_id=70395) Plains = Card(name="plains", pretty_name="Plains", cost=[], color_identity=['W'], card_type="Land", sub_types="Plains", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=250, mtga_id=70397) Plains2 = Card(name="plains", pretty_name="Plains", cost=[], color_identity=['W'], card_type="Land", sub_types="Plains", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=251, mtga_id=70398) Plains3 = Card(name="plains", pretty_name="Plains", cost=[], color_identity=['W'], card_type="Land", sub_types="Plains", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=252, mtga_id=70399) Plains4 = Card(name="plains", pretty_name="Plains", cost=[], color_identity=['W'], card_type="Land", sub_types="Plains", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=253, mtga_id=70400) Island = Card(name="island", pretty_name="Island", cost=[], color_identity=['U'], card_type="Land", sub_types="Island", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=254, mtga_id=70401) Island2 = Card(name="island", pretty_name="Island", cost=[], color_identity=['U'], card_type="Land", sub_types="Island", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=255, mtga_id=70402) Island3 = Card(name="island", pretty_name="Island", cost=[], color_identity=['U'], card_type="Land", sub_types="Island", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=256, mtga_id=70403) Island4 = Card(name="island", pretty_name="Island", cost=[], color_identity=['U'], card_type="Land", sub_types="Island", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=257, mtga_id=70404) Swamp = Card(name="swamp", pretty_name="Swamp", cost=[], color_identity=['B'], card_type="Land", sub_types="Swamp", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=258, mtga_id=70405) Swamp2 = Card(name="swamp", pretty_name="Swamp", cost=[], color_identity=['B'], card_type="Land", sub_types="Swamp", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=259, mtga_id=70406) Swamp3 = Card(name="swamp", pretty_name="Swamp", cost=[], color_identity=['B'], card_type="Land", sub_types="Swamp", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=260, mtga_id=70407) Swamp4 = Card(name="swamp", pretty_name="Swamp", cost=[], color_identity=['B'], card_type="Land", sub_types="Swamp", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=261, mtga_id=70408) Mountain = Card(name="mountain", pretty_name="Mountain", cost=[], color_identity=['R'], card_type="Land", sub_types="Mountain", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=262, mtga_id=70409) Mountain2 = Card(name="mountain", pretty_name="Mountain", cost=[], color_identity=['R'], card_type="Land", sub_types="Mountain", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=263, mtga_id=70410) Mountain3 = Card(name="mountain", pretty_name="Mountain", cost=[], color_identity=['R'], card_type="Land", sub_types="Mountain", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=264, mtga_id=70411) Mountain4 = Card(name="mountain", pretty_name="Mountain", cost=[], color_identity=['R'], card_type="Land", sub_types="Mountain", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=265, mtga_id=70412) Forest = Card(name="forest", pretty_name="Forest", cost=[], color_identity=['G'], card_type="Land", sub_types="Forest", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=266, mtga_id=70413) Forest2 = Card(name="forest", pretty_name="Forest", cost=[], color_identity=['G'], card_type="Land", sub_types="Forest", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=267, mtga_id=70414) Forest3 = Card(name="forest", pretty_name="Forest", cost=[], color_identity=['G'], card_type="Land", sub_types="Forest", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=268, mtga_id=70415) Forest4 = Card(name="forest", pretty_name="Forest", cost=[], color_identity=['G'], card_type="Land", sub_types="Forest", abilities=[], set_id="ELD", rarity="Basic", collectible=True, set_number=269, mtga_id=70416) WindScarredCrag = Card(name="windscarred_crag", pretty_name="Wind-Scarred Crag", cost=[], color_identity=['R', 'W'], card_type="Land", sub_types="", abilities=[76735, 90050, 4247], set_id="ELD", rarity="Common", collectible=True, set_number=308, mtga_id=70421) ThornwoodFalls = Card(name="thornwood_falls", pretty_name="Thornwood Falls", cost=[], color_identity=['G', 'U'], card_type="Land", sub_types="", abilities=[76735, 90050, 18504], set_id="ELD", rarity="Common", collectible=True, set_number=313, mtga_id=70426) Goat = Card(name="goat", pretty_name="Goat", cost=[], color_identity=[], card_type="Creature", sub_types="Goat", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10001, mtga_id=70427) Human = Card(name="human", pretty_name="Human", cost=[], color_identity=[], card_type="Creature", sub_types="Human", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10002, mtga_id=70428) Knight = Card(name="knight", pretty_name="Knight", cost=[], color_identity=[], card_type="Creature", sub_types="Knight", abilities=[15], set_id="ELD", rarity="Token", collectible=False, set_number=10003, mtga_id=70429) Mouse = Card(name="mouse", pretty_name="Mouse", cost=[], color_identity=[], card_type="Creature", sub_types="PlaceholderSubType2", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10004, mtga_id=70430) Faerie = Card(name="faerie", pretty_name="Faerie", cost=[], color_identity=[], card_type="Creature", sub_types="Faerie", abilities=[8], set_id="ELD", rarity="Token", collectible=False, set_number=10005, mtga_id=70431) Rat = Card(name="rat", pretty_name="Rat", cost=[], color_identity=[], card_type="Creature", sub_types="Rat", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10006, mtga_id=70432) Dwarf = Card(name="dwarf", pretty_name="Dwarf", cost=[], color_identity=[], card_type="Creature", sub_types="Dwarf", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10007, mtga_id=70433) Bear = Card(name="bear", pretty_name="Bear", cost=[], color_identity=[], card_type="Creature", sub_types="Bear", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10008, mtga_id=70434) Boar = Card(name="boar", pretty_name="Boar", cost=[], color_identity=[], card_type="Creature", sub_types="Boar", abilities=[136165], set_id="ELD", rarity="Token", collectible=False, set_number=10009, mtga_id=70435) Giant = Card(name="giant", pretty_name="Giant", cost=[], color_identity=[], card_type="Creature", sub_types="Giant", abilities=[], set_id="ELD", rarity="Token", collectible=False, set_number=10010, mtga_id=70436) HumanCleric = Card(name="human_cleric", pretty_name="Human Cleric", cost=[], color_identity=[], card_type="Creature", sub_types="Human Cleric", abilities=[12, 9], set_id="ELD", rarity="Token", collectible=False, set_number=10011, mtga_id=70437) HumanRogue = Card(name="human_rogue", pretty_name="Human Rogue", cost=[], color_identity=[], card_type="Creature", sub_types="Human Rogue", abilities=[9, 136242], set_id="ELD", rarity="Token", collectible=False, set_number=10012, mtga_id=70438) HumanWarrior = Card(name="human_warrior", pretty_name="Human Warrior", cost=[], color_identity=[], card_type="Creature", sub_types="Human Warrior", abilities=[14, 9], set_id="ELD", rarity="Token", collectible=False, set_number=10013, mtga_id=70439) Wolf = Card(name="wolf", pretty_name="Wolf", cost=[], color_identity=[], card_type="Creature", sub_types="Wolf", abilities=[136216], set_id="ELD", rarity="Token", collectible=False, set_number=10014, mtga_id=70440) Food = Card(name="food", pretty_name="Food", cost=[], color_identity=[], card_type="Artifact", sub_types="Food", abilities=[197], set_id="ELD", rarity="Token", collectible=False, set_number=10015, mtga_id=70441) Food2 = Card(name="food", pretty_name="Food", cost=[], color_identity=[], card_type="Artifact", sub_types="Food", abilities=[197], set_id="ELD", rarity="Token", collectible=False, set_number=10016, mtga_id=70442) Food3 = Card(name="food", pretty_name="Food", cost=[], color_identity=[], card_type="Artifact", sub_types="Food", abilities=[197], set_id="ELD", rarity="Token", collectible=False, set_number=10017, mtga_id=70443) Food4 = Card(name="food", pretty_name="Food", cost=[], color_identity=[], card_type="Artifact", sub_types="Food", abilities=[197], set_id="ELD", rarity="Token", collectible=False, set_number=10018, mtga_id=70444) MaceoftheValiant = Card(name="mace_of_the_valiant", pretty_name="Mace of the Valiant", cost=['2', 'W'], color_identity=['W'], card_type="Artifact", sub_types="Equipment", abilities=[136349, 136350, 1156], set_id="ELD", rarity="Rare", collectible=True, set_number=314, mtga_id=70447) FaerieFormation = Card(name="faerie_formation", pretty_name="Faerie Formation", cost=['4', 'U'], color_identity=['U'], card_type="Creature", sub_types="Faerie", abilities=[8, 136368], set_id="ELD", rarity="Rare", collectible=True, set_number=316, mtga_id=70449) ShimmerDragon = Card(name="shimmer_dragon", pretty_name="Shimmer Dragon", cost=['4', 'U', 'U'], color_identity=['U'], card_type="Creature", sub_types="Dragon", abilities=[8, 136352, 136364], set_id="ELD", rarity="Rare", collectible=True, set_number=317, mtga_id=70450) WorkshopElders = Card(name="workshop_elders", pretty_name="Workshop Elders", cost=['6', 'U'], color_identity=['U'], card_type="Creature", sub_types="Human Artificer", abilities=[136353, 136366], set_id="ELD", rarity="Rare", collectible=True, set_number=318, mtga_id=70451) TasteofDeath = Card(name="taste_of_death", pretty_name="Taste of Death", cost=['4', 'B', 'B'], color_identity=['B'], card_type="Sorcery", sub_types="", abilities=[136355], set_id="ELD", rarity="Rare", collectible=True, set_number=320, mtga_id=70453) SteelbaneHydra = Card(name="steelbane_hydra", pretty_name="Steelbane Hydra", cost=['X', 'G', 'G'], color_identity=['G'], card_type="Creature", sub_types="Turtle Hydra", abilities=[76885, 136369], set_id="ELD", rarity="Rare", collectible=True, set_number=322, mtga_id=70455) ThornMammoth = Card(name="thorn_mammoth", pretty_name="Thorn Mammoth", cost=['5', 'G', 'G'], color_identity=['G'], card_type="Creature", sub_types="Elephant", abilities=[14, 136357], set_id="ELD", rarity="Rare", collectible=True, set_number=323, mtga_id=70456) AlelaArtfulProvocateur = Card(name="alela_artful_provocateur", pretty_name="Alela, Artful Provocateur", cost=['1', 'W', 'U', 'B'], color_identity=['W', 'U', 'B'], card_type="Creature", sub_types="Faerie Warlock", abilities=[8, 1, 12, 121999, 136371], set_id="ELD", rarity="Mythic Rare", collectible=True, set_number=324, mtga_id=70457) BanishintoFable = Card(name="banish_into_fable", pretty_name="Banish into Fable", cost=['4', 'W', 'U'], color_identity=['W', 'U'], card_type="Instant", sub_types="", abilities=[1389, 136359], set_id="ELD", rarity="Rare", collectible=True, set_number=325, mtga_id=70458) ChulaneTellerofTales = Card(name="chulane_teller_of_tales", pretty_name="Chulane, Teller of Tales", cost=['2', 'G', 'W', 'U'], color_identity=['W', 'U', 'G'], card_type="Creature", sub_types="Human Druid", abilities=[15, 136373, 1392], set_id="ELD", rarity="Mythic Rare", collectible=True, set_number=326, mtga_id=70459) KnightsCharge = Card(name="knights_charge", pretty_name="Knights' Charge", cost=['1', 'W', 'B'], color_identity=['W', 'B'], card_type="Enchantment", sub_types="", abilities=[1395, 136361], set_id="ELD", rarity="Rare", collectible=True, set_number=328, mtga_id=70461) KorvoldFaeCursedKing = Card(name="korvold_faecursed_king", pretty_name="Korvold, Fae-Cursed King", cost=['2', 'B', 'R', 'G'], color_identity=['B', 'R', 'G'], card_type="Creature", sub_types="Dragon Noble", abilities=[8, 136365, 1398], set_id="ELD", rarity="Mythic Rare", collectible=True, set_number=329, mtga_id=70462) SyrGwynHeroofAshvale = Card(name="syr_gwyn_hero_of_ashvale", pretty_name="Syr Gwyn, Hero of Ashvale", cost=['3', 'R', 'W', 'B'], color_identity=['W', 'B', 'R'], card_type="Creature", sub_types="Human Knight", abilities=[15, 142, 1399, 1400], set_id="ELD", rarity="Mythic Rare", collectible=True, set_number=330, mtga_id=70463) ArcaneSignet = Card(name="arcane_signet", pretty_name="Arcane Signet", cost=['2'], color_identity=[], card_type="Artifact", sub_types="", abilities=[90126], set_id="ELD", rarity="Common", collectible=True, set_number=331, mtga_id=70464) TomeofLegends = Card(name="tome_of_legends", pretty_name="Tome of Legends", cost=['2'], color_identity=[], card_type="Artifact", sub_types="", abilities=[136362, 136372, 136363], set_id="ELD", rarity="Rare", collectible=True, set_number=332, mtga_id=70465) CommandTower = Card(name="command_tower", pretty_name="Command Tower", cost=[], color_identity=[], card_type="Land", sub_types="", abilities=[90126], set_id="ELD", rarity="Common", collectible=True, set_number=333, mtga_id=70466) BringtoLife = Card(name="bring_to_life", pretty_name="Bring to Life", cost=['2', 'U'], color_identity=['U'], card_type="Sorcery", sub_types="Adventure", abilities=[136488], set_id="ELD", rarity="Uncommon", collectible=False, set_number=38, mtga_id=70477) ProfaneInsight = Card(name="profane_insight", pretty_name="Profane Insight", cost=['2', 'B'], color_identity=['B'], card_type="Instant", sub_types="Adventure", abilities=[1416], set_id="ELD", rarity="Uncommon", collectible=False, set_number=90, mtga_id=70483) AlterFate = Card(name="alter_fate", pretty_name="Alter Fate", cost=['1', 'B'], color_identity=['B'], card_type="Sorcery", sub_types="Adventure", abilities=[24122], set_id="ELD", rarity="Uncommon", collectible=False, set_number=99, mtga_id=70485) CurryFavor = Card(name="curry_favor", pretty_name="Curry Favor", cost=['B'], color_identity=['B'], card_type="Sorcery", sub_types="Adventure", abilities=[136490], set_id="ELD", rarity="Common", collectible=False, set_number=105, mtga_id=70487) BattleDisplay = Card(name="battle_display", pretty_name="Battle Display", cost=['R'], color_identity=['R'], card_type="Sorcery", sub_types="Adventure", abilities=[22564], set_id="ELD", rarity="Uncommon", collectible=False, set_number=122, mtga_id=70489) FertileFootsteps = Card(name="fertile_footsteps", pretty_name="Fertile Footsteps", cost=['2', 'G'], color_identity=['G'], card_type="Sorcery", sub_types="Adventure", abilities=[5296], set_id="ELD", rarity="Uncommon", collectible=False, set_number=149, mtga_id=70492) SeasonalRitual = Card(name="seasonal_ritual", pretty_name="Seasonal Ritual", cost=['G'], color_identity=['G'], card_type="Sorcery", sub_types="Adventure", abilities=[1429], set_id="ELD", rarity="Common", collectible=False, set_number=174, mtga_id=70497) clsmembers = [card for name, card in inspect.getmembers(sys.modules[__name__]) if isinstance(card, Card)] ThroneOfEldraine = Set("ThroneOfEldraine", cards=clsmembers) set_ability_map = {1: 'Deathtouch', 3: 'Double strike', 7: 'Flash', 8: 'Flying', 9: 'Haste', 12: 'Lifelink', 14: 'Trample', 15: 'Vigilance', 142: 'Menace', 197: '{o2}, {oT}, Sacrifice this artifact: You gain 3 life.', 1027: 'Enchant creature', 1055: '{oT}: Add one mana of any color.', 1152: '{oT}: Add {oC}.', 1156: 'Equip {o3}', 1268: 'Equip {o1}', 1316: "Shambling Suit's power is equal to the number of artifacts and/or " 'enchantments you control.', 1389: 'When you cast this spell from your hand, copy it if you control an ' 'artifact, then copy it if you control an enchantment. You may choose ' 'new targets for the copies.', 1392: "{o3}, {oT}: Return target creature you control to its owner's hand.", 1395: 'Whenever a Knight you control attacks, each opponent loses 1 life and ' 'you gain 1 life.', 1398: 'Whenever you sacrifice a permanent, put a +1/+1 counter on Korvold and ' 'draw a card.', 1399: 'Whenever an equipped creature you control attacks, you draw a card and ' 'you lose 1 life.', 1400: 'Equipment you control have equip Knight {o0}.', 1416: 'You draw a card and you lose 1 life.', 1429: 'Add one mana of any color.', 2374: '{oT}: Add one mana of the chosen color.', 2512: 'Equipped creature gets +2/+2.', 4247: '{oT}: Add {oR} or {oW}.', 5296: 'Search your library for a basic land card, put it onto the ' 'battlefield, then shuffle your library.', 7610: 'Equipped creature gets +0/+2 and has vigilance.', 18504: '{oT}: Add {oG} or {oU}.', 22564: 'Destroy target artifact.', 24122: 'Return target creature card from your graveyard to your hand.', 76735: 'Thornwood Falls enters the battlefield tapped.', 76885: 'Steelbane Hydra enters the battlefield with X +1/+1 counters on it.', 86476: "Order of Midnight can't block.", 86788: 'When Golden Egg enters the battlefield, draw a card.', 88207: '{o1}, {oT}, Sacrifice Golden Egg: Add one mana of any color.', 88237: 'As Heraldic Banner enters the battlefield, choose a color.', 88259: "Beanstalk Giant's power and toughness are each equal to the number of " 'lands you control.', 90050: 'When Thornwood Falls enters the battlefield, you gain 1 life.', 90126: "{oT}: Add one mana of any color in your commander's color identity.", 100685: 'When Witching Well enters the battlefield, scry 2.', 103355: 'Shinechaser gets +1/+1 as long as you control an artifact.', 104880: '-3: Destroy target creature. Draw a card.', 119104: 'Other Knights you control get +1/+1.', 121999: 'Other creatures you control with flying get +1/+0.', 136076: 'Whenever another creature dies, or a creature card is put into a ' 'graveyard from anywhere other than the battlefield, or a creature ' 'card leaves your graveyard, Syr Konrad, the Grim deals 1 damage to ' 'each opponent.', 136077: '{o1oB}: Each player puts the top card of their library into their ' 'graveyard.', 136079: 'When Venerable Knight dies, put a +1/+1 counter on target Knight you ' 'control.', 136084: 'When Corridor Monitor enters the battlefield, untap target artifact ' 'or creature you control.', 136108: 'Whenever you draw your second card each turn, put a +1/+1 counter on ' 'Faerie Vandal.', 136125: 'Enchanted creature loses all abilities and is a blue Frog creature ' 'with base power and toughness 1/1. \n' '<i>(It loses all other card types and creature types.)</i>', 136126: '0: Create two 2/2 black and green Wolf creature tokens with "When ' 'this creature dies, put a loyalty counter on each Garruk you ' 'control."', 136128: '-6: You get an emblem with "Creatures you control get +3/+3 and have ' 'trample."', 136147: 'Sacrifice two Foods: Draw a card.', 136148: 'Shinechaser gets +1/+1 as long as you control an enchantment.', 136149: 'Equip Knight {o1}', 136151: "Wintermoor Commander's toughness is equal to the number of Knights " 'you control.', 136152: 'Whenever Wintermoor Commander attacks, another target Knight you ' 'control gains indestructible until end of turn.', 136153: "When Arcanist's Owl enters the battlefield, look at the top four " 'cards of your library. You may reveal an artifact or enchantment ' 'card from among them and put it into your hand. Put the rest on the ' 'bottom of your library in a random order.', 136165: 'When this creature dies, create a Food token.', 136178: '{oT}, Sacrifice a creature: Create a Food token. If the sacrificed ' "creature's toughness was 4 or greater, create two Food tokens " 'instead.', 136198: '{oT}: Add {oR}, {oW}, or {oB}. Spend this mana only to cast a Knight ' 'or Equipment spell.', 136216: 'When this creature dies, put a loyalty counter on each Garruk you ' 'control.', 136218: 'Choose two target creatures controlled by different players. Return ' "those creatures to their owners' hands.", 136240: '{o3oU}, Sacrifice Witching Well: Draw two cards.', 136241: 'When Shining Armor enters the battlefield, attach it to target ' 'Knight you control.', 136242: 'When this creature enters the battlefield, it deals 1 damage to any ' 'target.', 136246: 'Destroy target creature. Create a Food token.', 136248: 'Whenever Belle of the Brawl attacks, other Knights you control get ' '+1/+0 until end of turn.', 136251: 'Whenever Savvy Hunter attacks or blocks, create a Food token.', 136258: 'Whenever one or more non-Human creatures you control deal combat ' 'damage to a player, draw a card.', 136269: '{o(R/W)o(R/W)o(R/W)o(R/W)}: Fireborn Knight gets +1/+1 until end of ' 'turn.', 136278: '{o2}, {oT}, Sacrifice Golden Egg: You gain 3 life.', 136279: 'Creatures you control of the chosen color get +1/+0.', 136299: 'Equipped creature gets +1/+0 and has haste.', 136335: 'Enchanted creature gets +1/+1 for each artifact and/or enchantment ' 'you control.', 136349: 'Equipped creature gets +1/+1 for each charge counter on Mace of the ' 'Valiant and has vigilance.', 136350: 'Whenever a creature enters the battlefield under your control, put a ' 'charge counter on Mace of the Valiant.', 136352: 'As long as you control four or more artifacts, Shimmer Dragon has ' 'hexproof.', 136353: 'Artifact creatures you control have flying.', 136355: 'Each player sacrifices three creatures. You create three Food ' 'tokens.', 136357: 'Whenever Thorn Mammoth or another creature enters the battlefield ' 'under your control, Thorn Mammoth fights up to one target creature ' "you don't control.", 136359: "Return target nonland permanent to its owner's hand. You create a " '2/2 white Knight creature token with vigilance.', 136361: "{o6oWoB}, Sacrifice Knights' Charge: Return all Knight creature " 'cards from your graveyard to the battlefield.', 136362: 'Tome of Legends enters the battlefield with a page counter on it.', 136363: '{o1}, {oT}, Remove a page counter from Tome of Legends: Draw a card.', 136364: 'Tap two untapped artifacts you control: Draw a card.', 136365: 'Whenever Korvold, Fae-Cursed King enters the battlefield or attacks, ' 'sacrifice another permanent.', 136366: 'At the beginning of combat on your turn, you may have target ' 'noncreature artifact you control become a 0/0 artifact creature. If ' 'you do, put four +1/+1 counters on it.', 136368: '{o3oU}: Create a 1/1 blue Faerie creature token with flying. Draw a ' 'card.', 136369: '{o2oG}, Remove a +1/+1 counter from Steelbane Hydra: Destroy target ' 'artifact or enchantment.', 136371: 'Whenever you cast an artifact or enchantment spell, create a 1/1 ' 'blue Faerie creature token with flying.', 136372: 'Whenever your commander enters the battlefield or attacks, put a ' 'page counter on Tome of Legends.', 136373: 'Whenever you cast a creature spell, draw a card, then you may put a ' 'land card from your hand onto the battlefield.', 136488: 'Target noncreature artifact you control becomes a 0/0 artifact ' 'creature. Put four +1/+1 counters on it.', 136490: 'You gain X life and each opponent loses X life, where X is the ' 'number of Knights you control.'}
PypiClean
/15five-django-ajax-selects-1.5.2.155.tar.gz/15five-django-ajax-selects-1.5.2.155/CHANGELOG.md
# Change Log ## [1.5.2](https://github.com/crucialfelix/django-ajax-selects/tree/1.5.2) (2016-10-19) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.5.1...1.5.2) **Fixed bugs:** - Occasionally: $.ui.autocomplete is undefined [\#188](https://github.com/crucialfelix/django-ajax-selects/issues/188) **Closed issues:** - No cache management headers in HTTP response [\#187](https://github.com/crucialfelix/django-ajax-selects/issues/187) ## [1.5.1](https://github.com/crucialfelix/django-ajax-selects/tree/1.5.1) (2016-10-13) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.5.0...1.5.1) **Implemented enhancements:** - Prefer document.createElement to document.write [\#182](https://github.com/crucialfelix/django-ajax-selects/issues/182) **Fixed bugs:** - fix: add related for multiple select [\#184](https://github.com/crucialfelix/django-ajax-selects/pull/184) ([crucialfelix](https://github.com/crucialfelix)) ## [1.5.0](https://github.com/crucialfelix/django-ajax-selects/tree/1.5.0) (2016-09-05) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.4.3...1.5.0) - Added Support for Django 1.10 - Dropped Django 1.5 **Fixed bugs:** - Initial fields are duplicated when new row added. [\#94](https://github.com/crucialfelix/django-ajax-selects/issues/94) **Closed issues:** - ValueError in Django 1.10 [\#177](https://github.com/crucialfelix/django-ajax-selects/issues/177) - Django 1.10 did add popup [\#174](https://github.com/crucialfelix/django-ajax-selects/issues/174) - Example not Working [\#161](https://github.com/crucialfelix/django-ajax-selects/issues/161) **Merged pull requests:** - Fix documentation to format code properly [\#165](https://github.com/crucialfelix/django-ajax-selects/pull/165) ([joshblum](https://github.com/joshblum)) - install.sh not working [\#162](https://github.com/crucialfelix/django-ajax-selects/pull/162) ([hdzierz](https://github.com/hdzierz)) ## [1.4.3](https://github.com/crucialfelix/django-ajax-selects/tree/1.4.3) (2016-03-13) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.4.2...1.4.3) **Closed issues:** - Additional stacked inlines clear un-saved autocomplete fields [\#156](https://github.com/crucialfelix/django-ajax-selects/issues/156) - support request: ManyToOneRel doesn't have expected attributes [\#154](https://github.com/crucialfelix/django-ajax-selects/issues/154) **Merged pull requests:** - Stop using deprecated \_meta api. [\#160](https://github.com/crucialfelix/django-ajax-selects/pull/160) ([kramarz](https://github.com/kramarz)) - Fixed file name in documentation for custom templates. [\#158](https://github.com/crucialfelix/django-ajax-selects/pull/158) ([sebslomski](https://github.com/sebslomski)) - Fixes re-initialization upon adding inlines [\#157](https://github.com/crucialfelix/django-ajax-selects/pull/157) ([funkyfuture](https://github.com/funkyfuture)) ## [1.4.2](https://github.com/crucialfelix/django-ajax-selects/tree/1.4.2) (2016-01-18) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.4.1...1.4.2) **Fixed bugs:** - Selected data lost when adding new rows via ajax [\#145](https://github.com/crucialfelix/django-ajax-selects/issues/145) - Inline forms raise TypeError when not filled in [\#142](https://github.com/crucialfelix/django-ajax-selects/issues/142) **Merged pull requests:** - Fix incorrect has\_changed result for AutoCompleteSelectField that has not been filled in. [\#152](https://github.com/crucialfelix/django-ajax-selects/pull/152) ([unklphil](https://github.com/unklphil)) - Only trigger reset\(\) initially if data hasn't changed. [\#146](https://github.com/crucialfelix/django-ajax-selects/pull/146) ([jmfederico](https://github.com/jmfederico)) ## [1.4.1](https://github.com/crucialfelix/django-ajax-selects/tree/1.4.1) (2015-11-18) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.4.0...1.4.1) **Closed issues:** - Templates not included in pypi1.4.0 package [\#141](https://github.com/crucialfelix/django-ajax-selects/issues/141) - Documentation seems to be broken on RTD [\#140](https://github.com/crucialfelix/django-ajax-selects/issues/140) ## [1.4.0](https://github.com/crucialfelix/django-ajax-selects/tree/1.4.0) (2015-11-07) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.6...1.4.0) **Implemented enhancements:** - Pass `request` to `LookupChannel` methods, make overriding easier [\#40](https://github.com/crucialfelix/django-ajax-selects/issues/40) **Fixed bugs:** - AttributeError on invalid form data [\#135](https://github.com/crucialfelix/django-ajax-selects/issues/135) - Doesn't work with readonly\_fields [\#120](https://github.com/crucialfelix/django-ajax-selects/issues/120) - Add another popup doesn't add `?\_popup=1` to url in Django 1.8 [\#118](https://github.com/crucialfelix/django-ajax-selects/issues/118) - Field appers duplicated when marked as readonly in admin [\#84](https://github.com/crucialfelix/django-ajax-selects/issues/84) **Closed issues:** - can't import register module [\#139](https://github.com/crucialfelix/django-ajax-selects/issues/139) - How to fire lookup for value in text field using javascript [\#137](https://github.com/crucialfelix/django-ajax-selects/issues/137) - tests not included in MANIFEST.in [\#136](https://github.com/crucialfelix/django-ajax-selects/issues/136) - Content of input not included in field, only dropdown choices for make\_ajax\_field [\#134](https://github.com/crucialfelix/django-ajax-selects/issues/134) - documentation for add link on ajax fields for django admin inlines [\#127](https://github.com/crucialfelix/django-ajax-selects/issues/127) - Can't specify widget for AutoCompleteSelectMultipleField [\#126](https://github.com/crucialfelix/django-ajax-selects/issues/126) - RemovedInDjango19Warning in ajax\_select [\#125](https://github.com/crucialfelix/django-ajax-selects/issues/125) - Django's form change\_data always include autocomplete fields [\#123](https://github.com/crucialfelix/django-ajax-selects/issues/123) - AttributeError: 'int' object has no attribute 'isnumeric' [\#117](https://github.com/crucialfelix/django-ajax-selects/issues/117) - Error with TheForm in Django 1.8 [\#115](https://github.com/crucialfelix/django-ajax-selects/issues/115) - Not Secure. invalid literal for long\(\) with base 10 [\#114](https://github.com/crucialfelix/django-ajax-selects/issues/114) - ImportError: No module named ajax\_select [\#112](https://github.com/crucialfelix/django-ajax-selects/issues/112) - 'AutoCompleteSelectWidget' object has no attribute 'choices' [\#111](https://github.com/crucialfelix/django-ajax-selects/issues/111) - "Uncaught TypeError: Cannot read property 'autocomplete' of undefined" [\#107](https://github.com/crucialfelix/django-ajax-selects/issues/107) - Regression? Or UUID PK not supported [\#103](https://github.com/crucialfelix/django-ajax-selects/issues/103) - Support lookup channels from third-party apps [\#98](https://github.com/crucialfelix/django-ajax-selects/issues/98) - callbacks for select doesn't work [\#97](https://github.com/crucialfelix/django-ajax-selects/issues/97) - DeprecationWarning: Creating a ModelForm without either the 'fields' attribute or the 'exclude' attribute is deprecated [\#96](https://github.com/crucialfelix/django-ajax-selects/issues/96) - AutoCompleteSelectField has no attribute 'limit\_choices\_to' in Django 1.7 [\#83](https://github.com/crucialfelix/django-ajax-selects/issues/83) - Custom form [\#81](https://github.com/crucialfelix/django-ajax-selects/issues/81) - avoid warning when installing via pip [\#53](https://github.com/crucialfelix/django-ajax-selects/issues/53) - search\_fields like in ModelAdmin [\#21](https://github.com/crucialfelix/django-ajax-selects/issues/21) - Issues when using django-admin-sortable [\#12](https://github.com/crucialfelix/django-ajax-selects/issues/12) **Merged pull requests:** - Get rid of terrible `\_as\_pk` function \(fixes \#117, \#120, and \#135\) [\#138](https://github.com/crucialfelix/django-ajax-selects/pull/138) ([hwkns](https://github.com/hwkns)) - Reset button handling [\#132](https://github.com/crucialfelix/django-ajax-selects/pull/132) ([jmerdich](https://github.com/jmerdich)) - Remove unnecessary backquotes in README.md [\#131](https://github.com/crucialfelix/django-ajax-selects/pull/131) ([zablotski](https://github.com/zablotski)) - Feature autodiscover [\#129](https://github.com/crucialfelix/django-ajax-selects/pull/129) ([morr0350](https://github.com/morr0350)) - Example for get\_formset on inline admin [\#128](https://github.com/crucialfelix/django-ajax-selects/pull/128) ([rlskoeser](https://github.com/rlskoeser)) - ajax\_lookup should respond with content type `application/json` [\#119](https://github.com/crucialfelix/django-ajax-selects/pull/119) ([unklphil](https://github.com/unklphil)) - Add AjaxSelectAdminStackedInline to work similarly to AjaxSelectAdminTabularInline [\#89](https://github.com/crucialfelix/django-ajax-selects/pull/89) ([unklphil](https://github.com/unklphil)) ## [1.3.6](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.6) (2015-04-06) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.5...1.3.6) **Closed issues:** - 'AutoCompleteSelectWidget' object has no attribute 'choices' [\#110](https://github.com/crucialfelix/django-ajax-selects/issues/110) - \_\_init\_\_\(\) got an unexpected keyword argument 'mimetype' [\#108](https://github.com/crucialfelix/django-ajax-selects/issues/108) - Limit number of results returned by lookup and auto load additional results when user scrolls to bottom of list [\#105](https://github.com/crucialfelix/django-ajax-selects/issues/105) - Support reverse relationships [\#99](https://github.com/crucialfelix/django-ajax-selects/issues/99) - 'set' object does not support indexing [\#93](https://github.com/crucialfelix/django-ajax-selects/issues/93) - deck area [\#92](https://github.com/crucialfelix/django-ajax-selects/issues/92) - Inline won't work with new lines \(SOLVED\) [\#87](https://github.com/crucialfelix/django-ajax-selects/issues/87) - Bug in ajax\_selects.js \(addKiller function call\) [\#79](https://github.com/crucialfelix/django-ajax-selects/issues/79) - AutoCompleteSelectField breaks when using localization and long ids [\#68](https://github.com/crucialfelix/django-ajax-selects/issues/68) - format\_match did not work with django-ajax-select 1.3.3 [\#58](https://github.com/crucialfelix/django-ajax-selects/issues/58) - Support Non-integer Primary Keys \(mongodb etc\) [\#34](https://github.com/crucialfelix/django-ajax-selects/issues/34) - non operation with mongodb [\#3](https://github.com/crucialfelix/django-ajax-selects/issues/3) **Merged pull requests:** - Change order for running script by .sh \#112 \(NOTICE\) [\#113](https://github.com/crucialfelix/django-ajax-selects/pull/113) ([skrzypek](https://github.com/skrzypek)) - Update README.md [\#101](https://github.com/crucialfelix/django-ajax-selects/pull/101) ([cormier](https://github.com/cormier)) - Added option for fields in TheForm superclass [\#91](https://github.com/crucialfelix/django-ajax-selects/pull/91) ([onyekaa](https://github.com/onyekaa)) ## [1.3.5](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.5) (2014-08-02) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.4...1.3.5) **Closed issues:** - ajax-selects/fields.py simplejson is deprecated [\#74](https://github.com/crucialfelix/django-ajax-selects/issues/74) - Document the use in template for 'quick installation' [\#71](https://github.com/crucialfelix/django-ajax-selects/issues/71) - Document how to use an ajax field in a ListFilter in admin [\#70](https://github.com/crucialfelix/django-ajax-selects/issues/70) - Issue with Ajax-Search on Media-Fields [\#60](https://github.com/crucialfelix/django-ajax-selects/issues/60) - Set width of jquery autocomplete widget [\#30](https://github.com/crucialfelix/django-ajax-selects/issues/30) **Merged pull requests:** - Fix issue 58 and pull request 76 [\#85](https://github.com/crucialfelix/django-ajax-selects/pull/85) ([camillobruni](https://github.com/camillobruni)) - Django's HttpResponse object has deprecated the mimetype kwarg in 1.7 [\#82](https://github.com/crucialfelix/django-ajax-selects/pull/82) ([squidsoup](https://github.com/squidsoup)) - Support non-int primary keys [\#78](https://github.com/crucialfelix/django-ajax-selects/pull/78) ([AlexHill](https://github.com/AlexHill)) - correct import deprecated since Django 1.4 [\#77](https://github.com/crucialfelix/django-ajax-selects/pull/77) ([gertingold](https://github.com/gertingold)) - maintain compatibility with Python 2.6 [\#75](https://github.com/crucialfelix/django-ajax-selects/pull/75) ([gertingold](https://github.com/gertingold)) ## [1.3.4](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.4) (2014-03-30) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.3...1.3.4) **Closed issues:** - Custom validation with django-ajax-selects [\#73](https://github.com/crucialfelix/django-ajax-selects/issues/73) - DeprecationWarning django.utils.simplejson [\#63](https://github.com/crucialfelix/django-ajax-selects/issues/63) - When create select list always show \(in bottom list\) add new object. [\#62](https://github.com/crucialfelix/django-ajax-selects/issues/62) **Merged pull requests:** - Trivial typo fix \(chanel\_name\) [\#69](https://github.com/crucialfelix/django-ajax-selects/pull/69) ([gthb](https://github.com/gthb)) - Fixes \#18 - AJAX Selector and dynamic inlines [\#67](https://github.com/crucialfelix/django-ajax-selects/pull/67) ([peterfarrell](https://github.com/peterfarrell)) - Using json as opposed to simplejson \(depreciated\) [\#65](https://github.com/crucialfelix/django-ajax-selects/pull/65) ([krzysztof](https://github.com/krzysztof)) ## [1.3.3](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.3) (2013-11-13) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.2...1.3.3) **Merged pull requests:** - Remove protocol from dynamically loaded urls. [\#54](https://github.com/crucialfelix/django-ajax-selects/pull/54) ([jellonek](https://github.com/jellonek)) ## [1.3.2](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.2) (2013-11-09) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.1...1.3.2) ## [1.3.1](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.1) (2013-10-09) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.3.0...1.3.1) **Closed issues:** - parameters to triggers [\#43](https://github.com/crucialfelix/django-ajax-selects/issues/43) - django.conf.urls.defaults depreciated [\#38](https://github.com/crucialfelix/django-ajax-selects/issues/38) - How do you pass a class name for the addKiller [\#37](https://github.com/crucialfelix/django-ajax-selects/issues/37) - AutoComplete and AutoCompleteSelect renders fine but AutoCompleteMultipleSelect isnt working [\#31](https://github.com/crucialfelix/django-ajax-selects/issues/31) - django inline formset [\#18](https://github.com/crucialfelix/django-ajax-selects/issues/18) ## [1.3.0](https://github.com/crucialfelix/django-ajax-selects/tree/1.3.0) (2013-10-08) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.2.5...1.3.0) **Closed issues:** - ContentNotRenderedError [\#39](https://github.com/crucialfelix/django-ajax-selects/issues/39) - Please add a change trigger to the target. [\#35](https://github.com/crucialfelix/django-ajax-selects/issues/35) - can\_add isn't working in lookups [\#23](https://github.com/crucialfelix/django-ajax-selects/issues/23) **Merged pull requests:** - Follow the Meta definition of the original modelform [\#49](https://github.com/crucialfelix/django-ajax-selects/pull/49) ([artscoop](https://github.com/artscoop)) ## [1.2.5](https://github.com/crucialfelix/django-ajax-selects/tree/1.2.5) (2012-08-22) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.2.4...1.2.5) **Closed issues:** - dj1.4 Error importing template source loader django.template.loaders.filesystem.load\_template\_source: [\#15](https://github.com/crucialfelix/django-ajax-selects/issues/15) - fixed bug: AutoCompleteSelectMultipleField does not honor 'widget' parameter [\#14](https://github.com/crucialfelix/django-ajax-selects/issues/14) - error 'this.data\("autocomplete"\) is undefined' [\#10](https://github.com/crucialfelix/django-ajax-selects/issues/10) - Fire the change event on selection [\#8](https://github.com/crucialfelix/django-ajax-selects/issues/8) - ValueError: translation table must be 256 characters long [\#5](https://github.com/crucialfelix/django-ajax-selects/issues/5) - Error on Pop-Up [\#19](https://github.com/crucialfelix/django-ajax-selects/issues/19) **Merged pull requests:** - Small fix in CSS [\#2](https://github.com/crucialfelix/django-ajax-selects/pull/2) ([karlmoritz](https://github.com/karlmoritz)) ## [1.2.4](https://github.com/crucialfelix/django-ajax-selects/tree/1.2.4) (2012-01-15) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.2.3...1.2.4) ## [1.2.3](https://github.com/crucialfelix/django-ajax-selects/tree/1.2.3) (2011-11-29) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.2.1...1.2.3) ## [1.2.1](https://github.com/crucialfelix/django-ajax-selects/tree/1.2.1) (2011-10-19) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.1.5...1.2.1) ## [1.1.5](https://github.com/crucialfelix/django-ajax-selects/tree/1.1.5) (2011-08-24) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.1.3...1.1.5) ## [1.1.3](https://github.com/crucialfelix/django-ajax-selects/tree/1.1.3) (2010-06-06) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.1.1...1.1.3) ## [1.1.1](https://github.com/crucialfelix/django-ajax-selects/tree/1.1.1) (2010-06-03) [Full Changelog](https://github.com/crucialfelix/django-ajax-selects/compare/1.1.0...1.1.1) ## [1.1.0](https://github.com/crucialfelix/django-ajax-selects/tree/1.1.0) (2010-03-06) \* *This Change Log was automatically generated by [github_changelog_generator](https://github.com/skywinder/Github-Changelog-Generator)*
PypiClean
/Flask-Multi-Redis-0.1.5.tar.gz/Flask-Multi-Redis-0.1.5/LICENSE.rst
GNU Affero General Public License ================================= *Version 3, 19 November 2007* *Copyright © 2007 Free Software Foundation, In* <http://www.fsf.org> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble -------- The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. Developers that use our General Public Licenses protect your rights with two steps: **(1)** assert copyright on the software, and **(2)** offer you this License which gives you legal permission to copy, distribute and/or modify the software. A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and encouraged by the resulting cooperation. However, in the case of software used on network servers, this result may fail to come about. The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS -------------------- 0. Definitions ~~~~~~~~~~~~~~ "This License" refers to version 3 of the GNU Affero General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that **(1)** displays an appropriate copyright notice, and **(2)** tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code ~~~~~~~~~~~~~~ The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that **(a)** is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and **(b)** serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions ~~~~~~~~~~~~~~~~~~~~ All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. ### 5. Conveying Modified Source Versions You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: * **a)** The work must carry prominent notices stating that you modified it, and giving a relevant date. * **b)** The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". * **c)** You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. * **d)** If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: * **a)** Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. * **b)** Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either **(1)** a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or **(2)** access to copy the Corresponding Source from a network server at no charge. * **c)** Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. * **d)** Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. * **e)** Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either **(1)** a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or **(2)** anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms ~~~~~~~~~~~~~~~~~~~ "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: * **a)** Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or * **b)** Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or * **c)** Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or * **d)** Limiting the use for publicity purposes of names of licensors or authors of the material; or * **e)** Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or * **f)** Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination ~~~~~~~~~~~~~~ You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated **(a)** provisionally, unless and until the copyright holder explicitly and finally terminates your license, and **(b)** permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents ~~~~~~~~~~~ A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either **(1)** cause the Corresponding Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the patent license for this particular work, or **(3)** arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license **(a)** in connection with copies of the covered work conveyed by you (or copies made from those copies), or **(b)** primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Remote Network Interaction; Use with the GNU General Public License ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Notwithstanding any other provision of this License, if you modify the Program, your modified version must prominently offer all users interacting with it remotely through a computer network (if your version supports such interaction) an opportunity to receive the Corresponding Source of your version by providing access to the Corresponding Source from a network server at no charge, through some standard or customary means of facilitating copying of software. This Corresponding Source shall include the Corresponding Source for any work covered by version 3 of the GNU General Public License that is incorporated pursuant to the following paragraph. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the work with which it is combined will remain governed by version 3 of the GNU General Public License. 14. Revised Versions of this License ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Free Software Foundation may publish revised and/or new versions of the GNU Affero General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU Affero General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU Affero General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU Affero General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty ~~~~~~~~~~~~~~~~~~~~~~~~~~ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability ~~~~~~~~~~~~~~~~~~~~~~~~~~~ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. *END OF TERMS AND CONDITIONS* How to Apply These Terms to Your New Programs --------------------------------------------- If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. | <one line to give the program's name and a brief idea of what it does.> | Copyright (C) <year> <name of author> | | This program is free software: you can redistribute it and/or modify | it under the terms of the GNU Affero General Public License as published by | the Free Software Foundation, either version 3 of the License, or | (at your option) any later version. | | This program is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU Affero General Public License for more details. | | You should have received a copy of the GNU Affero General Public License | along with this program. If not, see <http://www.gnu.org/licenses/>. Also add information on how to contact you by electronic and paper mail. If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see <http://www.gnu.org/licenses/>.
PypiClean
/GRImpulsiveWaves-0.3.2-py3-none-any.whl/grimpulsivewaves/plotting/dynamic.py
import numpy as np import plotly import plotly.graph_objects as go from plotly.io import write_image import random class PlotlyDynamicPlotter: def __init__(self, labels=["x", "y", "z"], title="", aspectratio=None, xrange=None, yrange=None, zrange=None, showSpikes=True, spikeColor="#000000", bgcolor="#fff", fontsize=10, ticks=True, tick_fontsize=12): self.labels = labels axis = dict(showline=True, linewidth=4, title=dict(font=dict(size=20)), showticklabels=ticks, tickfont=dict(size=tick_fontsize), backgroundcolor=bgcolor) layout = go.Layout(scene= dict( xaxis_title=labels[0], yaxis_title=labels[1], zaxis_title=labels[2], xaxis=axis, yaxis=axis, zaxis=axis), font=dict( size=fontsize) ) self.fig = go.Figure(layout=layout) if xrange: self.fig.update_layout(scene=dict( xaxis=dict(range=xrange, showbackground=True) )) if yrange: self.fig.update_layout(scene=dict( yaxis=dict(range=yrange, showbackground=True) )) if zrange: self.fig.update_layout(scene=dict( zaxis=dict(range=zrange, showbackground=True) )) if showSpikes: self.fig.update_layout(scene=dict( xaxis=dict(spikecolor=spikeColor, spikesides=False, spikethickness=2), yaxis=dict(spikecolor=spikeColor, spikesides=False, spikethickness=2), zaxis=dict(spikecolor=spikeColor, spikesides=False, spikethickness=2) )) else: self.fig.update_layout(scene=dict( xaxis=dict(showspikes=False), yaxis=dict(showspikes=False), zaxis=dict(showspikes=False) )) self.fig.update_layout(title=dict(text=title, font=dict(size=35), x=0.5, xanchor="center", y=0.8, yanchor="top")) if aspectratio: self.fig.update_layout(scene_aspectmode='manual', scene_aspectratio=dict(x=aspectratio[0], y=aspectratio[1], z=aspectratio[2])) def plotTrajectory3D(self, trajectory, color="#{:06x}".format(random.randint(0, 0xFFFFFF)).upper(), xc=1, yc=2, zc=3, name="", t=None, showlegend=False, opacity=1, linewidth=2, dash='solid'): hinfo = "%{fullData.name}<br><br>" + self.labels[0] + ": %{x}<br>" + self.labels[1] + ": %{y}<br>" + self.labels[2] + ": %{z}<br><br>" + r"tau: %{text}<extra></extra>" xs = np.array([x[xc] for x in trajectory]).flatten() ys = np.array([x[yc] for x in trajectory]).flatten() zs = np.array([x[zc] for x in trajectory]).flatten() self.fig.add_scatter3d(x=xs, y=ys, z=zs, mode="lines", line=go.scatter3d.Line(color=color, width=linewidth, dash=dash), name=name, hoverinfo='all', opacity=opacity) if t is not None: self.fig['data'][-1].update(text=t) self.fig['data'][-1].update(hovertemplate=hinfo) self.fig['data'][-1].update(showlegend=showlegend) def plotHyperboloid(self, l=1, vsize=(-2,2), opacity=0.5, plot_edges=False, color="rgb(" + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + ")", drawImpulse=False, showlegend=False, drawCoords=False): """ Generate hyperboloid :param l: Cosmological constant """ import plotly.figure_factory as ff #I hope Python is clever! from scipy.spatial import Delaunay eps = np.sign(l) a = np.sqrt(3/np.abs(l)) u = np.linspace(0, 2 * np.pi, 90) v = np.linspace(vsize[0], vsize[1], 90) #TODO: resolution should not be hard-coded u, v = np.meshgrid(u, v) u = u.flatten() v = v.flatten() if(eps > 0): x = a * np.cosh(v/a) * np.cos(u) y = a * np.cosh(v/a) * np.sin(u) z = a * np.sinh(v/a) else: x = a * np.cosh(v/a) * np.cos(u) z = a * np.cosh(v/a) * np.sin(u) y = a * np.sinh(v/a) points2D = np.vstack([u, v]).T tri = Delaunay(points2D) simplices = tri.simplices _tempfig = ff.create_trisurf(x=x, y=y, z=z, simplices=simplices, show_colorbar=False, colormap=color, plot_edges=plot_edges, aspectratio=dict(x=1, y=1, z=0.8)) _tempfig['data'][0].update(opacity=opacity) _tempfig['data'][0].update(hoverinfo='skip') _tempfig['data'][0].update(hoverinfo='skip') _tempfig['data'][0].update(name="Hyperboloid") _tempfig['data'][0].update(showlegend=showlegend) if drawImpulse: v = np.linspace(vsize[0], vsize[1], 10) z = v[:-1] y = v[:-1] x = 10 * [-a] x2 = 10 * [a] _tempfig.add_scatter3d(x=x, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=4), name="U = infinity", hoverinfo='skip', showlegend=showlegend, opacity=0.4) _tempfig.add_scatter3d(x=x2, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=8), name="U = 0", hoverinfo='skip', showlegend=showlegend, opacity=0.8) if drawCoords: b = a + 0.005 bm = a - 0.005 u = np.linspace(0, 2 * np.pi, 12) ud = np.linspace(0, 2 * np.pi, 120) v = np.linspace(vsize[0], vsize[1], 12) vd = np.linspace(vsize[0], vsize[1], 120) #u const for u0 in u: if (eps > 0): x = b * np.cosh(vd / b) * np.cos(u0) xm = bm * np.cosh(vd / bm) * np.cos(u0) y = b * np.cosh(vd / b) * np.sin(u0) ym = bm * np.cosh(vd / bm) * np.sin(u0) z = b * np.sinh(vd / b) zm = bm * np.sinh(vd / bm) else: x = b * np.cosh(vd / b) * np.cos(u0) xm = bm * np.cosh(vd / bm) * np.cos(u0) y = b * np.sinh(vd / b) ym = bm * np.sinh(vd / bm) z = b * np.cosh(vd / b) * np.sin(u0) zm = bm * np.cosh(vd / bm) * np.sin(u0) _tempfig.add_scatter3d(x=x, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=2), showlegend=False, opacity=1, name="", hoverinfo='skip') _tempfig.add_scatter3d(x=xm, y=ym, z=zm, mode="lines", line=go.scatter3d.Line(color="black", width=2), showlegend=False, opacity=1, name="", hoverinfo='skip') #v const for v0 in v: if (eps > 0): x = b * np.cosh(v0 / b) * np.cos(ud) y = b * np.cosh(v0 / b) * np.sin(ud) z = b * np.sinh(v0 / b) * np.ones_like(ud) xm = bm * np.cosh(v0 / bm) * np.cos(ud) ym = bm * np.cosh(v0 / bm) * np.sin(ud) zm = bm * np.sinh(v0 / bm) * np.ones_like(ud) else: x = b * np.cosh(v0 / b) * np.cos(ud) y = b * np.sinh(v0 / b) * np.ones_like(ud) z = b * np.cosh(v0 / b) * np.sin(ud) xm = bm * np.cosh(v0 / bm) * np.cos(ud) ym = bm * np.sinh(v0 / bm) * np.ones_like(ud) zm = bm * np.cosh(v0 / bm) * np.sin(ud) _tempfig.add_scatter3d(x=x, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=2), showlegend=False, opacity=1, name="", hoverinfo='skip') _tempfig.add_scatter3d(x=xm, y=ym, z=zm, mode="lines", line=go.scatter3d.Line(color="black", width=2), showlegend=False, opacity=1, name="", hoverinfo='skip') self.fig.add_traces(_tempfig.data) def plotCutAndPasteHyperboloid(self, H, l, vsize=(-2, 2), opacity=0.5, plot_edges=False, color="rgb(" + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + ")", drawImpulse=False, showlegend=False, drawOnlyMinus=False): from scipy.spatial import Delaunay a = np.sqrt(3 / np.abs(l)) eps = np.sign(l) uo = np.linspace(0, 2 * np.pi, 650) v = np.linspace(vsize[0], vsize[1], 550) # TODO: resolution should not be hard-coded uo, v = np.meshgrid(uo, v) u = uo.flatten() v = v.flatten() #TODO: OPRAVIT, ZKUST https://stackoverflow.com/questions/25060103/determine-sum-of-numpy-array-while-excluding-certain-values if (eps > 0): x = a * np.cosh(v / a) * np.cos(u) # Z1 z = a * np.sinh(v / a) # Z0 y = a * np.cosh(v / a) * np.sin(u) # Z4 else: x = a * np.cosh(v/a) * np.cos(u) z = a * np.cosh(v / a) * np.sin(u) y = a * np.sinh(v / a) xm = np.array([a if c - b <= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) ym = np.array([b if c - b <= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) zm = np.array([c if c - b <= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) if not drawOnlyMinus: xp = np.array([a if c - b >= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) yp = np.array([b - 1. / np.sqrt(2) * H if c - b >= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) zp = np.array([c + 1. / np.sqrt(2) * H if c - b >= 0 else np.nan for a, b, c in zip(x, y, z)]).reshape(uo.shape) _tempfig = go.Figure(data=([ go.Surface(x=xm, y=ym, z=zm, colorscale=[color, color]) ] if drawOnlyMinus else [ go.Surface(x=xp, y=yp, z=zp, colorscale=[color, color]), go.Surface(x=xm, y=ym, z=zm, colorscale=[color, color]) ])) if not drawOnlyMinus: _tempfig['data'][0].update(name="Hyperboloid +") _tempfig['data'][1].update(name="Hyperboloid -") else: _tempfig['data'][0].update(name="Hyperboloid -") _tempfig.update_traces(showscale=False, opacity=opacity, hoverinfo='skip', showlegend=showlegend) if drawImpulse: v = np.linspace(vsize[0], vsize[1], 10) z = v[:-1] y = v[:-1] x = 10 * [-a] x2 = 10 * [a] _tempfig.add_scatter3d(x=x, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=4), name="U- = infinity", hoverinfo='skip', showlegend=showlegend , opacity=0.4) _tempfig.add_scatter3d(x=x2, y=y, z=z, mode="lines", line=go.scatter3d.Line(color="black", width=8), name="U- = 0", hoverinfo='skip', showlegend=showlegend, opacity=0.8) if not drawOnlyMinus: _tempfig.add_scatter3d(x=x , y=y - 1. / np.sqrt(2) * H, z=z + 1. / np.sqrt(2) * H, mode="lines", line=go.scatter3d.Line(color="black", width=4), name="U+ = infinity", hoverinfo='skip', showlegend=showlegend, opacity=0.4) _tempfig.add_scatter3d(x=x2, y=y - 1. / np.sqrt(2) * H, z=z + 1. / np.sqrt(2) * H, mode="lines", line=go.scatter3d.Line(color="black", width=8), name="U+ = 0", hoverinfo='skip', showlegend=showlegend, opacity=0.8) self.fig.add_traces(_tempfig.data) def plotSurface(self, f, *args, xdomain=[-1, 1], ydomain=[-1, 1], xstep=0.05, ystep=0.05, color="rgb(" + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + "," + str(random.randint(50,100)) + ")", color2=None, complexNull=False, name="Surface", showlegend=False): x = np.arange(xdomain[0], xdomain[1], xstep) y = np.arange(ydomain[0], ydomain[1], ystep) #X, Y = np.meshgrid(x, y) z = np.zeros((x.size, y.size)) for i in range(x.size): for j in range(y.size): if complexNull: z[i, j] = f(x[i] + 1j * y[j], x[i] - 1j * y[j], args) else: z[i, j] = f(x[i], y[j], args) if z[i, j] == np.NaN: z[i, j] = 0 if color2 is None: color2 = color _tempfig = go.Figure(data=[ go.Surface(x=x, y=y, z=z, colorscale=[color, color2], name=name, surfacecolor=np.sqrt(np.abs(z)))]) _tempfig.update_traces(showscale=False, showlegend=showlegend) self.fig.add_traces(_tempfig.data) def show(self): self.fig.show() def export_html(self, path, include_plotlyjs=True, include_mathjax=False): if include_mathjax: include_mathjax = 'cdn' self.fig.write_html(path, include_plotlyjs=include_plotlyjs, include_mathjax=include_mathjax) #TODO: Do something with default camera def export_pdf(self, path, eye=(1.25, 1.25, 1.25), up=(.0, .0, 1.0), orbit=False, title=True): """ This requires Kaleido, install using "pip install -U Kaleido". :param path: Path of resulting file :param eye: Eye of camera :return: """ camera = dict( eye=dict(x=eye[0], y=eye[1], z=eye[2]), up=dict(x=up[0], y=up[1], z=up[2]) ) if orbit: self.fig.update_layout(scene = dict(dragmode='orbit')) self.fig.update_layout(scene_camera=camera, showlegend=False) write_image(self.fig, path, format="pdf", scale=3, engine="kaleido", width=1024, height=1024) self.fig.update_layout(scene_camera=dict(eye=dict(x=1.25, y=1.25, z=1.25), up=dict(x=.0,y=.0,z=1.0)), showlegend=True, scene=dict(dragmode='turntable'))
PypiClean
/Flask-Perm-0.2.8.tar.gz/Flask-Perm-0.2.8/flask_perm/app.py
import logging from functools import wraps from flask import session, request from sqlalchemy.exc import IntegrityError from werkzeug.security import generate_password_hash from werkzeug.security import check_password_hash from .core import db, bcrypt class Perm(object): class Denied(Exception): pass def __init__(self, app=None): self.app = app self.user_callback = None self.current_user_callback = None self.users_callback = None self.users_count_callback = None self._admin_logger = logging.getLogger('flask_perm.admin') self.registered_permissions = set() if app is not None: self.init_app(app) @property def admin_logger(self): return self._admin_logger @admin_logger.setter def admin_logger(self, logger): self._admin_logger = logger def init_app(self, app): """Initialize Perm object. """ if not hasattr(app, 'extensions'): app.extensions = {} app.extensions['perm'] = self db.app = app db.init_app(app) bcrypt.app = app bcrypt.init_app(app) app.config.setdefault('PERM_ADMIN_PREFIX', '/perm-admin') app.config.setdefault('PERM_ADMIN_ECHO', False) from . import models db.create_all() from .api import bp as api_bp app.register_blueprint(api_bp, url_prefix=app.config.get('PERM_ADMIN_PREFIX') + '/api') from .admin import bp as admin_bp app.register_blueprint(admin_bp, url_prefix=app.config.get('PERM_ADMIN_PREFIX')) self.register_context_processors(app) def log_admin_action(self, msg): '''Log msg to `flask.admin` logger.''' if self.app.config.get('PERM_ADMIN_ECHO'): self.admin_logger.info(msg) def require_perm_admin(self, f): '''A decorator that can protect function from unauthorized request. Used in perm admin dashboard.''' @wraps(f) def _(*args, **kwargs): if not session.get('perm_admin_id'): raise self.Denied return f(*args, **kwargs) return _ def create_super_admin(self, email, password): """Create superadmin / Reset password.""" from .services import SuperAdminService try: superadmin = SuperAdminService.create(email, password) except IntegrityError: superadmin = SuperAdminService.get_by_email(email) superadmin = SuperAdminService.reset_password(superadmin.id, password) return SuperAdminService.to_dict(superadmin) def login_perm_admin(self, super_admin_id): """Get authorization to access perm admin dashboard.""" session['perm_admin_id'] = super_admin_id def logout_perm_admin(self): """Revoke authorization from accessing perm admin dashboard.""" session.pop('perm_admin_id', None) def get_perm_admin_id_from_session(self): from .services import SuperAdminService admin_id = session.get('perm_admin_id') super_admin = admin_id and SuperAdminService.get(admin_id) return super_admin and super_admin.id def get_perm_admin_id_by_auth(self, email, password): from .services import SuperAdminService if SuperAdminService.verify_password(email, password): super_admin = SuperAdminService.get_by_email(email) return super_admin and super_admin.id def get_perm_admin_id(self): """Get super admin id. Both basic authorization and cookie are support.""" if request.authorization: auth = request.authorization return self.get_perm_admin_id_by_auth(auth.username, auth.password) return self.get_perm_admin_id_from_session() def has_perm_admin_logined(self): """""" return bool(self.get_perm_admin_id()) def user_loader(self, callback): """Define user loader. Required if you plan to use perm admin dashboard. The callback will be used to render user basic information in dashboard. Callback must take `user_id` integer parameter. """ self.user_callback = callback return callback def current_user_loader(self, callback): """Define current user loader. Required if you plan to use decorator to protect your function. The callback will be used in deciding whether current user has authority. Callback takes no parameters. """ self.current_user_callback = callback return callback def users_loader(self, callback): """Define users loader. Required if you plan to use perm admin dashboard. The callback will be used to render whole user list in dashboard. Callback must take 5 parameters: * filter_by={}, * sort_field='created_at', * sort_dir='desc', * offset=0, * limit=20 """ self.users_callback = callback return callback def users_count_loader(self, callback): """Define users count loader. Required if you plan to use perm admin dashboard. The callback will be used in paginating user list. Callback takes no parameters.""" self.users_count_callback = callback return callback def load_user(self, user_id): if self.user_callback is None: raise NotImplementedError('You must register user_loader!') return self.user_callback(user_id) def load_current_user(self): if self.current_user_callback is None: raise NotImplementedError('You must register current_user_loader!') return self.current_user_callback() def load_users(self, filter_by={}, sort_field='created_at', sort_dir='desc', offset=0, limit=20): if self.users_callback is None: raise NotImplementedError('You must register users_loader!') return self.users_callback(**dict( filter_by=filter_by, sort_field=sort_field, sort_dir=sort_dir, offset=offset, limit=limit, )) def load_users_count(self): if self.users_count_callback is None: raise NotImplementedError('You must register users_count_loader') return self.users_count_callback() def has_permission(self, user_id, code): """Decide whether a user has a permission identified by `codes`. Code is defined in perm admin dashboard.""" from .services import VerificationService, PermissionService permission = PermissionService.get_by_code(code) if not permission: return False return VerificationService.has_permission(user_id, permission.id) def has_permissions(self, user_id, *codes): """Decide whether a user has permissions identified by `codes`. Codes are defined in perm admin dashboard.""" if not codes: return True if '*' in codes: return any( self.has_permission(user_id, code) for code in self.get_all_permission_codes() ) else: return any(self.has_permission(user_id, code) for code in codes) def get_user_permissions(self, user_id): """Define all permission codes that authorized to a user. Codes are defined in perm admin dashboard.""" from .services import VerificationService, PermissionService permission_ids = VerificationService.get_user_permissions(user_id) permissions = map(PermissionService.get, permission_ids) permissions = filter(None, permissions) permissions = map(PermissionService.rest, permissions) return permissions def get_all_permission_codes(self): """Get all permission codes. WARNING: this might have performance issue.""" from .services import PermissionService permissions = PermissionService.get_permissions() return [permission.code for permission in permissions] def is_user_in_groups(self, user_id, *groups): """Decide whether a user is in groups. Groups are defined in perm admin dashboard.""" from .services import UserGroupService, UserGroupMemberService if not groups: return False if '*' in groups: user_group_ids = UserGroupService.get_all_user_group_ids() else: user_groups = UserGroupService.get_user_groups_by_codes(groups) user_group_ids = [user_group.id for user_group in user_groups] if not user_group_ids: return False return UserGroupMemberService.is_user_in_groups(user_id, user_group_ids) def require_group(self, *groups): """A decorator that can decide whether current user is in listed groups. Groups are defined in perm admin dashboard.""" from .services import UserGroupService, UserGroupMemberService def deco(func): @wraps(func) def _(*args, **kwargs): current_user = self.load_current_user() if not current_user: raise self.Denied current_user_id = current_user.id is_allowed = self.is_user_in_groups(current_user_id, *groups) if is_allowed: return func(*args, **kwargs) else: raise self.Denied return _ return deco def require_group_in_template(self, *groups): """Require group in template""" from .services import UserGroupService, UserGroupMemberService current_user = self.load_current_user() if not current_user: return False current_user_id = current_user.id return self.is_user_in_groups(current_user_id, *groups) def require_permission(self, *codes): """A decorator that can decide whether current user has listed permission codes. Codes are defined in perm admin dashboard.""" for code in codes: self.registered_permissions.add(code) def deco(func): @wraps(func) def _(*args, **kwargs): current_user = self.load_current_user() if not current_user: raise self.Denied is_allowed = self.has_permissions(current_user.id, *codes) if is_allowed: return func(*args, **kwargs) else: raise self.Denied return _ return deco def require_permission_in_template(self, *codes): """Require permission in template.""" for code in codes: self.registered_permissions.add(code) current_user = self.load_current_user() if not current_user: return False return self.has_permissions(current_user.id, *codes) def default_context_processors(self): return { 'require_permission': self.require_permission_in_template, 'require_group': self.require_group_in_template, } def register_commands(self, flask_script_manager): """Register several convinient Flask-Script commands. WARNING: make sure you have installed Flask-Script. :param flask_script_manager: a flask.ext.script.Manager object. """ from .script import perm_manager flask_script_manager.add_command('perm', perm_manager) def register_context_processors(self, app): """Register default context processors to app. """ app.context_processor(self.default_context_processors)
PypiClean
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msgmerge.py
__revision__ = "src/engine/SCons/Tool/msgmerge.py 2014/07/05 09:42:21 garyo" ############################################################################# def _update_or_init_po_files(target, source, env): """ Action function for `POUpdate` builder """ import SCons.Action from SCons.Tool.GettextCommon import _init_po_files for tgt in target: if tgt.rexists(): action = SCons.Action.Action('$MSGMERGECOM', '$MSGMERGECOMSTR') else: action = _init_po_files status = action([tgt], source, env) if status : return status return 0 ############################################################################# ############################################################################# def _POUpdateBuilder(env, **kw): """ Create an object of `POUpdate` builder """ import SCons.Action from SCons.Tool.GettextCommon import _POFileBuilder action = SCons.Action.Action(_update_or_init_po_files, None) return _POFileBuilder(env, action=action, target_alias='$POUPDATE_ALIAS') ############################################################################# ############################################################################# from SCons.Environment import _null ############################################################################# def _POUpdateBuilderWrapper(env, target=None, source=_null, **kw): """ Wrapper for `POUpdate` builder - make user's life easier """ if source is _null: if 'POTDOMAIN' in kw: domain = kw['POTDOMAIN'] elif env.has_key('POTDOMAIN') and env['POTDOMAIN']: domain = env['POTDOMAIN'] else: domain = 'messages' source = [ domain ] # NOTE: Suffix shall be appended automatically return env._POUpdateBuilder(target, source, **kw) ############################################################################# ############################################################################# def generate(env,**kw): """ Generate the `xgettext` tool """ from SCons.Tool.GettextCommon import _detect_msgmerge try: env['MSGMERGE'] = _detect_msgmerge(env) except: env['MSGMERGE'] = 'msgmerge' env.SetDefault( POTSUFFIX = ['.pot'], POSUFFIX = ['.po'], MSGMERGECOM = '$MSGMERGE $MSGMERGEFLAGS --update $TARGET $SOURCE', MSGMERGECOMSTR = '', MSGMERGEFLAGS = [ ], POUPDATE_ALIAS = 'po-update' ) env.Append(BUILDERS = { '_POUpdateBuilder':_POUpdateBuilder(env) }) env.AddMethod(_POUpdateBuilderWrapper, 'POUpdate') env.AlwaysBuild(env.Alias('$POUPDATE_ALIAS')) ############################################################################# ############################################################################# def exists(env): """ Check if the tool exists """ from SCons.Tool.GettextCommon import _msgmerge_exists try: return _msgmerge_exists(env) except: return False ############################################################################# # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/data/classification/nlet/triplet_dataset.py
__author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 30/06/2020 """ import random from pathlib import Path from typing import Tuple import numpy import torch __all__ = ["TripletDataset"] from draugr.torch_utilities import global_pin_memory from torch.utils.data import DataLoader from neodroidvision.data.classification.nlet import PairDataset class TripletDataset( PairDataset ): # TODO: Extract image specificity of class to a subclass and move this super pair class to a # general torch lib. """ # This dataset generates a triple of images. an image of a category, another of the same category and lastly one from another category""" def __getitem__(self, idx1: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ returns torch.tensors for img triplet, first tensor being idx random category, second being the same category with different index and third being of a random other category(Never the same) :param idx1: :type idx1: :return: :rtype:""" t1 = random.choice(self._dataset.category_names) while True: idx2 = random.randint(0, self._dataset.category_sizes[t1]) if idx1 != idx2: break while True: t2 = random.choice(self._dataset.category_names) if t1 != t2: break return ( self._dataset.sample(t1, idx1)[0], self._dataset.sample(t1, idx2)[0], self._dataset.sample( t2, random.randint(0, self._dataset.category_sizes[t2]) )[0], *((t1, t2, t1) if self.return_categories else ()), ) def sample(self, horizontal_merge: bool = False) -> None: """description""" dl = iter( DataLoader( self, batch_size=9, shuffle=True, num_workers=0, pin_memory=global_pin_memory(0), ) ) for _ in range(3): images1, images2, images3, *labels = next(dl) X1 = numpy.transpose(images1.numpy(), [0, 2, 3, 1]) X2 = numpy.transpose(images2.numpy(), [0, 2, 3, 1]) X3 = numpy.transpose(images3.numpy(), [0, 2, 3, 1]) if horizontal_merge: X = numpy.dstack((X1, X2, X3)) else: X = numpy.hstack((X1, X2, X3)) PairDataset.plot_images(X, list(zip(*labels))) if __name__ == "__main__": sd = TripletDataset(Path.home() / "Data" / "mnist_png", return_categories=True) print(sd.predictor_shape) print(sd.response_shape) sd.sample()
PypiClean
/Jin-Py-1.0.31.tar.gz/Jin-Py-1.0.31/jin/composer.py
import os import sys pushCommandTemplate = ["rsync", "-vzaPr"] clearCommandTemplate = ["rsync", "-vPr", "--delete", "--existing", "--ignore-existing"] class Composer(): """Composes an rsync command from system arguements and jinconfig file""" def __init__(self, args, config): self.args = args self.config = config self.isDryRun = False self.isHot = False self.minToWatch = 1 self.command = self.dispatchCommandFromType(self.args[0]) def checkConfigForRequired(self, command, config): if command == "push": if self.config.get("sloc") == None and self.config.get("sdir") == None: print(".jinconfig doesn't contain source information!") sys.exit(1) if self.config.get("dloc") == None and self.config.get("ddir") == None: print(".jinconfig doesn't contain destination information!") sys.exit(1) if self.config.get("wmin") != None: self.minToWatch = int(self.config.get("wmin")[0]) if command == "empty": if self.config.get("dloc") == None and self.config.get("ddir") == None: print(".jinconfig doesn't contain destination information!") sys.exit(1) def dispatchCommandFromType(self, command): if command == "push": self.checkConfigForRequired(command, self.config) if len(self.args) > 1: for arg in self.args[1:]: if arg == "dry": pushCommandTemplate.append("--dry-run") self.isDryRun = True elif arg == "watch": self.isHot = True elif arg == "match-host": pushCommandTemplate.append("--delete") else: print(f"Invalid command '{arg}'!") sys.exit(1) return self.generatePushCommand() elif command == "empty": self.checkConfigForRequired(command, self.config) if len(self.args) > 1: for arg in self.args: if arg == "dry": clearCommandTemplate.append("--dry-run") self.isDryRun = True return self.generateClearCommand() else: print(f"Invalid command '{command}'! Exiting...") sys.exit(1) def generateClearCommand(self): command = clearCommandTemplate destination = "" if not os.path.exists("./.jin/_blank/"): print(".jin/_blank directory doesn't exist! Run 'jin make' to fix this.") sys.exit(1) if self.config.get("dloc") == None: destination = self.config.get("ddir")[0] else: destination = f"{self.config.get('dloc')[0]}:{self.config.get('ddir')[0]}" # command = rsync -r --delete --existing --ignore-existing ./jin/_blank/ [destination] command.extend((".jin/_blank/", destination)) return command def generatePushCommand(self): command = pushCommandTemplate source = "" destination = "" ignoreSpecific = [] ignoreType = [] if self.config.get("sloc") == None: source = self.config.get("sdir")[0] else: source = f"{self.config.get('sloc')[0]}:{self.config.get('sdir')[0]}" if self.config.get("dloc") == None: destination = self.config.get("ddir")[0] else: destination = f"{self.config.get('dloc')[0]}:{self.config.get('ddir')[0]}" if self.config.get("igspec") != None: for excludedSpecific in self.config.get("igspec"): ignoreSpecific.append("--exclude") ignoreSpecific.append(excludedSpecific) if self.config.get("igtype") != None: for excludedType in self.config.get("igtype"): ignoreType.append("--exclude") ignoreType.append(excludedType) for spec in ignoreSpecific: command.append(spec) for spec in ignoreType: command.append(spec) # command = rsync -vzaPr --exclude [ignoreSpecific] --exclude [ignoreType] [source] [destination] command.extend((source, destination)) print(" ".join(command), "\n") return command
PypiClean
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_tags_tag/put.py
from dataclasses import dataclass import typing_extensions import urllib3 from urllib3._collections import HTTPHeaderDict from firefly_iii_client import api_client, exceptions from datetime import date, datetime # noqa: F401 import decimal # noqa: F401 import functools # noqa: F401 import io # noqa: F401 import re # noqa: F401 import typing # noqa: F401 import typing_extensions # noqa: F401 import uuid # noqa: F401 import frozendict # noqa: F401 from firefly_iii_client import schemas # noqa: F401 from firefly_iii_client.model.tag_single import TagSingle from firefly_iii_client.model.validation_error import ValidationError from firefly_iii_client.model.unauthenticated import Unauthenticated from firefly_iii_client.model.bad_request import BadRequest from firefly_iii_client.model.tag_model_update import TagModelUpdate from firefly_iii_client.model.internal_exception import InternalException from firefly_iii_client.model.not_found import NotFound from . import path # Header params XTraceIdSchema = schemas.UUIDSchema RequestRequiredHeaderParams = typing_extensions.TypedDict( 'RequestRequiredHeaderParams', { } ) RequestOptionalHeaderParams = typing_extensions.TypedDict( 'RequestOptionalHeaderParams', { 'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ], }, total=False ) class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams): pass request_header_x_trace_id = api_client.HeaderParameter( name="X-Trace-Id", style=api_client.ParameterStyle.SIMPLE, schema=XTraceIdSchema, ) # Path params TagSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( 'RequestRequiredPathParams', { 'tag': typing.Union[TagSchema, str, ], } ) RequestOptionalPathParams = typing_extensions.TypedDict( 'RequestOptionalPathParams', { }, total=False ) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): pass request_path_tag = api_client.PathParameter( name="tag", style=api_client.ParameterStyle.SIMPLE, schema=TagSchema, required=True, ) # body param SchemaForRequestBodyApplicationJson = TagModelUpdate SchemaForRequestBodyApplicationXWwwFormUrlencoded = TagModelUpdate request_body_tag_model_update = api_client.RequestBody( content={ 'application/json': api_client.MediaType( schema=SchemaForRequestBodyApplicationJson), 'application/x-www-form-urlencoded': api_client.MediaType( schema=SchemaForRequestBodyApplicationXWwwFormUrlencoded), }, required=True, ) _auth = [ 'firefly_iii_auth', ] SchemaFor200ResponseBodyApplicationVndApijson = TagSingle @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor200ResponseBodyApplicationVndApijson, ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ 'application/vnd.api+json': api_client.MediaType( schema=SchemaFor200ResponseBodyApplicationVndApijson), }, ) SchemaFor400ResponseBodyApplicationJson = BadRequest @dataclass class ApiResponseFor400(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor400ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_400 = api_client.OpenApiResponse( response_cls=ApiResponseFor400, content={ 'application/json': api_client.MediaType( schema=SchemaFor400ResponseBodyApplicationJson), }, ) SchemaFor401ResponseBodyApplicationJson = Unauthenticated @dataclass class ApiResponseFor401(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor401ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_401 = api_client.OpenApiResponse( response_cls=ApiResponseFor401, content={ 'application/json': api_client.MediaType( schema=SchemaFor401ResponseBodyApplicationJson), }, ) SchemaFor404ResponseBodyApplicationJson = NotFound @dataclass class ApiResponseFor404(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor404ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_404 = api_client.OpenApiResponse( response_cls=ApiResponseFor404, content={ 'application/json': api_client.MediaType( schema=SchemaFor404ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = ValidationError @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor422ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ 'application/json': api_client.MediaType( schema=SchemaFor422ResponseBodyApplicationJson), }, ) SchemaFor500ResponseBodyApplicationJson = InternalException @dataclass class ApiResponseFor500(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor500ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_500 = api_client.OpenApiResponse( response_cls=ApiResponseFor500, content={ 'application/json': api_client.MediaType( schema=SchemaFor500ResponseBodyApplicationJson), }, ) _status_code_to_response = { '200': _response_for_200, '400': _response_for_400, '401': _response_for_401, '404': _response_for_404, '422': _response_for_422, '500': _response_for_500, } _all_accept_content_types = ( 'application/vnd.api+json', 'application/json', ) class BaseApi(api_client.Api): @typing.overload def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: typing_extensions.Literal["application/x-www-form-urlencoded"], header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def _update_tag_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = 'application/json', header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ Update existing tag. :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances """ self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params) self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) used_path = path.value _path_params = {} for parameter in ( request_path_tag, ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue serialized_data = parameter.serialize(parameter_data) _path_params.update(serialized_data) for k, v in _path_params.items(): used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() for parameter in ( request_header_x_trace_id, ): parameter_data = header_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue serialized_data = parameter.serialize(parameter_data) _headers.extend(serialized_data) # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_tag_model_update.serialize(body, content_type) _headers.add('Content-Type', content_type) if 'fields' in serialized_data: _fields = serialized_data['fields'] elif 'body' in serialized_data: _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, method='put'.upper(), headers=_headers, fields=_fields, body=_body, auth_settings=_auth, stream=stream, timeout=timeout, ) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: raise exceptions.ApiException( status=response.status, reason=response.reason, api_response=api_response ) return api_response class UpdateTag(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: typing_extensions.Literal["application/x-www-form-urlencoded"], header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def update_tag( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = 'application/json', header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._update_tag_oapg( body=body, header_params=header_params, path_params=path_params, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization ) class ApiForput(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def put( self, body: typing.Union[SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: typing_extensions.Literal["application/x-www-form-urlencoded"], header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = ..., header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,SchemaForRequestBodyApplicationXWwwFormUrlencoded,], content_type: str = 'application/json', header_params: RequestHeaderParams = frozendict.frozendict(), path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._update_tag_oapg( body=body, header_params=header_params, path_params=path_params, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization )
PypiClean
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/avatars/views/avatarSettingsFormView.js
"use strict"; (function () { const [readyPromise, resolve] = Promise.withResolver(); /** * A form for managing the settings of avatar services. * * This form lets you select the avatar service you wish to use, as well as * configure the settings for that avatar service. */ Djblets.Avatars.SettingsFormView = Backbone.View.extend({ events: { 'change #id_avatar_service_id': '_onServiceChanged', 'submit': '_onSubmit' }, /** * Initialize the form. */ initialize() { console.assert(Djblets.Avatars.SettingsFormView.instance === null); Djblets.Avatars.SettingsFormView.instance = this; this._configForms = new Map(); this._$config = this.$('.avatar-service-configuration'); const services = this.model.get('services'); this.listenTo(this.model, 'change:serviceID', () => this._showHideForms()); /* * The promise continuations will only be executed once the stack is * unwound. */ resolve(); }, /** * Validate the current form upon submission. * * Args: * e (Event): * The form submission event. */ _onSubmit(e) { const serviceID = this.model.get('serviceID'); const currentForm = this._configForms.get(serviceID); if (currentForm && !currentForm.validate()) { e.preventDefault(); } }, /** * Render the child forms. * * This will show the for the currently selected service if it has one. * * Returns: * Djblets.Avatars.SettingsFormView: * This view (for chaining). */ renderForms() { for (const form of this._configForms.values()) { form.render(); } /* * Ensure that if the browser sets the value of the <select> upon * refresh that we update the model accordingly. */ this.$('#id_avatar_service_id').change(); this._showHideForms(true); return this; }, /** * Show or hide the configuration form. */ _showHideForms() { const services = this.model.get('services'); const serviceID = this.model.get('serviceID'); const currentForm = this._configForms.get(serviceID); const previousID = this.model.previous('serviceID'); const previousForm = previousID ? this._configForms.get(previousID) : undefined; if (previousForm && currentForm) { previousForm.$el.hide(); currentForm.$el.show(); } else if (previousForm) { previousForm.$el.hide(); this._$config.hide(); } else if (currentForm) { currentForm.$el.show(); this._$config.show(); } }, /** * Handle the service being changed. * * Args: * e (Event): * The change event. */ _onServiceChanged(e) { const $target = $(e.target); this.model.set('serviceID', $target.val()); } }, { /** * The form instance. */ instance: null, /** * Add a configuration form to the instance. * * Args: * serviceID (string): * The unique ID for the avatar service. * * formClass (constructor): * The view to use for the form. */ addConfigForm(serviceID, formClass) { Djblets.Avatars.SettingsFormView.instance._configForms.set(serviceID, new formClass({ el: $(`[data-avatar-service-id="${serviceID}"]`), model: Djblets.Avatars.SettingsFormView.instance.model })); }, /** * A promise that is resolved when the avatar services form has been * initialized. */ ready: readyPromise }); })(); //# sourceMappingURL=avatarSettingsFormView.js.map
PypiClean
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/type1font.py
import binascii import enum import itertools import logging import re import struct import numpy as np from matplotlib.cbook import _format_approx from . import _api _log = logging.getLogger(__name__) # token types _TokenType = enum.Enum('_TokenType', 'whitespace name string delimiter number') class Type1Font: """ A class representing a Type-1 font, for use by backends. Attributes ---------- parts : tuple A 3-tuple of the cleartext part, the encrypted part, and the finale of zeros. decrypted : bytes The decrypted form of parts[1]. prop : dict[str, Any] A dictionary of font properties. """ __slots__ = ('parts', 'decrypted', 'prop') def __init__(self, input): """ Initialize a Type-1 font. Parameters ---------- input : str or 3-tuple Either a pfb file name, or a 3-tuple of already-decoded Type-1 font `~.Type1Font.parts`. """ if isinstance(input, tuple) and len(input) == 3: self.parts = input else: with open(input, 'rb') as file: data = self._read(file) self.parts = self._split(data) self.decrypted = self._decrypt(self.parts[1], 'eexec') self._parse() def _read(self, file): """Read the font from a file, decoding into usable parts.""" rawdata = file.read() if not rawdata.startswith(b'\x80'): return rawdata data = b'' while rawdata: if not rawdata.startswith(b'\x80'): raise RuntimeError('Broken pfb file (expected byte 128, ' 'got %d)' % rawdata[0]) type = rawdata[1] if type in (1, 2): length, = struct.unpack('<i', rawdata[2:6]) segment = rawdata[6:6 + length] rawdata = rawdata[6 + length:] if type == 1: # ASCII text: include verbatim data += segment elif type == 2: # binary data: encode in hexadecimal data += binascii.hexlify(segment) elif type == 3: # end of file break else: raise RuntimeError('Unknown segment type %d in pfb file' % type) return data def _split(self, data): """ Split the Type 1 font into its three main parts. The three parts are: (1) the cleartext part, which ends in a eexec operator; (2) the encrypted part; (3) the fixed part, which contains 512 ASCII zeros possibly divided on various lines, a cleartomark operator, and possibly something else. """ # Cleartext part: just find the eexec and skip whitespace idx = data.index(b'eexec') idx += len(b'eexec') while data[idx] in b' \t\r\n': idx += 1 len1 = idx # Encrypted part: find the cleartomark operator and count # zeros backward idx = data.rindex(b'cleartomark') - 1 zeros = 512 while zeros and data[idx] in b'0' or data[idx] in b'\r\n': if data[idx] in b'0': zeros -= 1 idx -= 1 if zeros: # this may have been a problem on old implementations that # used the zeros as necessary padding _log.info('Insufficiently many zeros in Type 1 font') # Convert encrypted part to binary (if we read a pfb file, we may end # up converting binary to hexadecimal to binary again; but if we read # a pfa file, this part is already in hex, and I am not quite sure if # even the pfb format guarantees that it will be in binary). idx1 = len1 + ((idx - len1 + 2) & ~1) # ensure an even number of bytes binary = binascii.unhexlify(data[len1:idx1]) return data[:len1], binary, data[idx+1:] _whitespace_or_comment_re = re.compile(br'[\0\t\r\014\n ]+|%[^\r\n\v]*') _token_re = re.compile(br'/{0,2}[^]\0\t\r\v\n ()<>{}/%[]+') _instring_re = re.compile(br'[()\\]') @staticmethod def _decrypt(ciphertext, key, ndiscard=4): """ Decrypt ciphertext using the Type-1 font algorithm The algorithm is described in Adobe's "Adobe Type 1 Font Format". The key argument can be an integer, or one of the strings 'eexec' and 'charstring', which map to the key specified for the corresponding part of Type-1 fonts. The ndiscard argument should be an integer, usually 4. That number of bytes is discarded from the beginning of plaintext. """ key = _api.check_getitem({'eexec': 55665, 'charstring': 4330}, key=key) plaintext = [] for byte in ciphertext: plaintext.append(byte ^ (key >> 8)) key = ((key+byte) * 52845 + 22719) & 0xffff return bytes(plaintext[ndiscard:]) @staticmethod def _encrypt(plaintext, key, ndiscard=4): """ Encrypt plaintext using the Type-1 font algorithm The algorithm is described in Adobe's "Adobe Type 1 Font Format". The key argument can be an integer, or one of the strings 'eexec' and 'charstring', which map to the key specified for the corresponding part of Type-1 fonts. The ndiscard argument should be an integer, usually 4. That number of bytes is prepended to the plaintext before encryption. This function prepends NUL bytes for reproducibility, even though the original algorithm uses random bytes, presumably to avoid cryptanalysis. """ key = _api.check_getitem({'eexec': 55665, 'charstring': 4330}, key=key) ciphertext = [] for byte in b'\0' * ndiscard + plaintext: c = byte ^ (key >> 8) ciphertext.append(c) key = ((key + c) * 52845 + 22719) & 0xffff return bytes(ciphertext) @classmethod def _tokens(cls, text): """ A PostScript tokenizer. Yield (token, value) pairs such as (_TokenType.whitespace, ' ') or (_TokenType.name, '/Foobar'). """ # Preload enum members for speed. tok_whitespace = _TokenType.whitespace tok_name = _TokenType.name tok_string = _TokenType.string tok_delimiter = _TokenType.delimiter tok_number = _TokenType.number pos = 0 while pos < len(text): match = cls._whitespace_or_comment_re.match(text, pos) if match: yield (tok_whitespace, match.group()) pos = match.end() elif text[pos:pos+1] == b'(': start = pos pos += 1 depth = 1 while depth: match = cls._instring_re.search(text, pos) if match is None: return pos = match.end() if match.group() == b'(': depth += 1 elif match.group() == b')': depth -= 1 else: # a backslash - skip the next character pos += 1 yield (tok_string, text[start:pos]) elif text[pos:pos + 2] in (b'<<', b'>>'): yield (tok_delimiter, text[pos:pos + 2]) pos += 2 elif text[pos:pos+1] == b'<': start = pos pos = text.index(b'>', pos) yield (tok_string, text[start:pos]) else: match = cls._token_re.match(text, pos) if match: try: float(match.group()) yield (tok_number, match.group()) except ValueError: yield (tok_name, match.group()) pos = match.end() else: yield (tok_delimiter, text[pos:pos + 1]) pos += 1 def _parse(self): """ Find the values of various font properties. This limited kind of parsing is described in Chapter 10 "Adobe Type Manager Compatibility" of the Type-1 spec. """ # Preload enum members for speed. tok_whitespace = _TokenType.whitespace tok_name = _TokenType.name tok_string = _TokenType.string tok_number = _TokenType.number # Start with reasonable defaults prop = {'weight': 'Regular', 'ItalicAngle': 0.0, 'isFixedPitch': False, 'UnderlinePosition': -100, 'UnderlineThickness': 50} filtered = ((token, value) for token, value in self._tokens(self.parts[0]) if token is not tok_whitespace) # The spec calls this an ASCII format; in Python 2.x we could # just treat the strings and names as opaque bytes but let's # turn them into proper Unicode, and be lenient in case of high bytes. def convert(x): return x.decode('ascii', 'replace') for token, value in filtered: if token is tok_name and value.startswith(b'/'): key = convert(value[1:]) token, value = next(filtered) if token is tok_name: if value in (b'true', b'false'): value = value == b'true' else: value = convert(value.lstrip(b'/')) elif token is tok_string: value = convert(value.lstrip(b'(').rstrip(b')')) elif token is tok_number: if b'.' in value: value = float(value) else: value = int(value) else: # more complicated value such as an array value = None if key != 'FontInfo' and value is not None: prop[key] = value # Fill in the various *Name properties if 'FontName' not in prop: prop['FontName'] = (prop.get('FullName') or prop.get('FamilyName') or 'Unknown') if 'FullName' not in prop: prop['FullName'] = prop['FontName'] if 'FamilyName' not in prop: extras = ('(?i)([ -](regular|plain|italic|oblique|(semi)?bold|' '(ultra)?light|extra|condensed))+$') prop['FamilyName'] = re.sub(extras, '', prop['FullName']) self.prop = prop @classmethod def _transformer(cls, tokens, slant, extend): tok_whitespace = _TokenType.whitespace tok_name = _TokenType.name def fontname(name): result = name if slant: result += b'_Slant_%d' % int(1000 * slant) if extend != 1.0: result += b'_Extend_%d' % int(1000 * extend) return result def italicangle(angle): return b'%a' % round( float(angle) - np.arctan(slant) / np.pi * 180, 5 ) def fontmatrix(array): array = array.lstrip(b'[').rstrip(b']').split() array = [float(x) for x in array] oldmatrix = np.eye(3, 3) oldmatrix[0:3, 0] = array[::2] oldmatrix[0:3, 1] = array[1::2] modifier = np.array([[extend, 0, 0], [slant, 1, 0], [0, 0, 1]]) newmatrix = np.dot(modifier, oldmatrix) array[::2] = newmatrix[0:3, 0] array[1::2] = newmatrix[0:3, 1] return ( '[%s]' % ' '.join(_format_approx(x, 6) for x in array) ).encode('ascii') def replace(fun): def replacer(tokens): token, value = next(tokens) # name, e.g., /FontMatrix yield value token, value = next(tokens) # possible whitespace while token is tok_whitespace: yield value token, value = next(tokens) if value != b'[': # name/number/etc. yield fun(value) else: # array, e.g., [1 2 3] result = b'' while value != b']': result += value token, value = next(tokens) result += value yield fun(result) return replacer def suppress(tokens): for _ in itertools.takewhile(lambda x: x[1] != b'def', tokens): pass yield b'' table = {b'/FontName': replace(fontname), b'/ItalicAngle': replace(italicangle), b'/FontMatrix': replace(fontmatrix), b'/UniqueID': suppress} for token, value in tokens: if token is tok_name and value in table: yield from table[value]( itertools.chain([(token, value)], tokens)) else: yield value def transform(self, effects): """ Return a new font that is slanted and/or extended. Parameters ---------- effects : dict A dict with optional entries: - 'slant' : float, default: 0 Tangent of the angle that the font is to be slanted to the right. Negative values slant to the left. - 'extend' : float, default: 1 Scaling factor for the font width. Values less than 1 condense the glyphs. Returns ------- `Type1Font` """ tokenizer = self._tokens(self.parts[0]) transformed = self._transformer(tokenizer, slant=effects.get('slant', 0.0), extend=effects.get('extend', 1.0)) return Type1Font((b"".join(transformed), self.parts[1], self.parts[2]))
PypiClean
/NucDetect-0.18.3-py3-none-any.whl/core/roi/ROI.py
from __future__ import annotations import hashlib import math import warnings from typing import Union, Dict, List, Tuple, Iterable import numpy as np from numba.typed import List as numList from core.roi.AreaAnalysis import get_bounding_box, get_center, get_surface, get_ellipse_radii, get_orientation_angle, \ get_orientation_vector, get_eccentricity, get_ovality class ROI: __slots__ = [ "main", "ident", "auto", "area", "dims", "stats", "ell_params", # ellipse parameters "length", "associated", "id", "marked" ] def __init__(self, main: bool = True, channel: str = "Blue", auto: bool = True, associated: Union[ROI, None] = None, marked: bool = False): """ Constructor of ROI class :param main: Indicates that this roi is on the main channel :param channel: Name of the channel :param auto: Indicates if the roi was automatically generated :param associated: The ROI this ROI is associated with :param marked: Convenience flag for processing """ self.main = main self.ident = channel self.auto = auto self.dims = {} self.area = [] self.stats = {} self.ell_params = {} self.length = -1 self.associated = associated self.marked = marked self.id = None def __add__(self, other): if isinstance(other, ROI): self.add_to_area(other.area) else: raise AttributeError("Addition only supported for ROI class!") def __eq__(self, other: ROI): if isinstance(other, ROI): return set(self.area) == set(other.area) else: return False def __ne__(self, other): if not isinstance(other, ROI): return True else: return not self.__eq__(other) def __gt__(self, other): if not isinstance(other, ROI): return False else: if len(self) > len(other): return True return False def __lt__(self, other): if not isinstance(other, ROI): return False else: if len(self) < len(other): return True return False def __len__(self): if self.length == -1: self.length = np.sum([x[2] for x in self.area]) return self.length else: return self.length def __hash__(self): if not self.id: md5 = hashlib.md5() ident = f"{self.ident}{self.area}".encode() md5.update(ident) self.id = int(f"0x{md5.hexdigest()}", 0) return self.id def merge(self, roi: ROI) -> None: """ Method to merge this roi with another ROI :param roi: The roi to merge with this :return: None """ if isinstance(roi, ROI): if roi.ident == self.ident: self.add_to_area(roi.area) self.id = None self.dims.clear() self.stats.clear() self.ell_params.clear() else: warnings.warn(f"The ROI {hash(self)} and " f"{hash(roi)} have different channel IDs!({self.ident}, {roi.ident})") else: raise ValueError("Not an ROI") def reset_stored_values(self) -> None: """ Method to reset the calculated id, stored dimensions, statistics and ellipse parameters :return: None """ self.id = None self.dims.clear() self.stats.clear() self.ell_params.clear() self.length = -1 self.calculate_dimensions() def set_area(self, rle: Iterable) -> None: """ Method to define the area of this ROI :param rle: run length encoded area :return: None """ self.area.clear() self.add_to_area(rle) def add_to_area(self, rle): """ Method to extend the area of this ROI with the given area :param rle: RL encoded area to add to this ROI :return: None """ self.area.extend(rle) self.reset_stored_values() def calculate_ellipse_parameters(self) -> Union[Dict[str, Union[int, float, Tuple, None]]]: """ Method to calculate the ellipse parameters of this ROI. :return: A dictionary containing the calculated parameters. None, if the ROI is not main """ # Check if the current ROI is main, else warn if not self.main: warnings.warn(f"Ellipse Parameter Calculation: ROI {hash(self)} is not marked as main") return {"center": None, "major_axis": None, "minor_axis": None, "angle": None, "orientation": None, "area": None, "shape_match": None, "eccentricity": None, "roundness": None} # Check if the parameters are already calculated if not self.ell_params: numba_area = numList(self.area) r_maj, r_min = get_ellipse_radii(numba_area) or_vec = get_orientation_vector(numba_area) angle = get_orientation_angle(numba_area) center = get_center(numba_area) area = get_surface(numba_area) self.ell_params["center"] = center self.ell_params["major_axis"] = r_maj self.ell_params["minor_axis"] = r_min self.ell_params["angle"] = - (math.degrees(angle) - 45) self.ell_params["orientation"] = or_vec self.ell_params["area"] = r_min * r_maj * math.pi self.ell_params["shape_match"] = self.ell_params["area"] / area self.ell_params["eccentricity"] = get_eccentricity(numba_area) self.ell_params["roundness"] = get_ovality(numba_area) return self.ell_params def calculate_roi_intersection(self, roi: ROI) -> float: """ Method to calculate the intersection ratio to another ROI :param roi: The other ROI :return: The degree of intersection as float """ max_intersection = min(len(self), len(roi)) intersection = set(self.area).intersection(set(roi.area)) return len(intersection) / max_intersection def calculate_dimensions(self) -> Dict[str, Union[int, float]]: """ Method to calculate the dimension of this roi :return: The calculated dimensions as dict """ if not self.dims: if self.area: numba_area = numList() # Add elements to area [numba_area.append(x) for x in self.area] y, x, height, width = get_bounding_box(numba_area) center = get_center(numba_area) area = get_surface(numba_area) self.dims["minX"] = x self.dims["maxX"] = x + width self.dims["minY"] = y self.dims["maxY"] = y + height self.dims["width"] = width self.dims["height"] = height self.dims["center"] = center self.dims["area"] = area else: raise Exception(f"ROI {self.id} does not contain any points!") return self.dims def extract_area_intensity(self, channel: np.ndarray) -> List[Union[int, float]]: """ Method to extract the intensity values of this roi from the given channel :param channel: The channel to extract the values from :return: The extracted values as list """ vals = [] for row in self.area: # Iterate over saved points for x in range(row[2]): vals.append( channel[row[0]][row[1] + x] ) return vals def calculate_statistics(self, channel: np.ndarray) -> Dict[str, Union[int, float]]: """ Method to calculate statistics for this roi :param channel: The channel this ROI is derived from :return: The calculated statistics """ if not self.stats: # Extract values from channel vals = self.extract_area_intensity(channel) self.stats = { "area": int(np.sum([x[2] for x in self.area])), "intensity average": float(np.average(vals)), "intensity median": float(np.median(vals)), "intensity maximum": int(np.amax(vals)), "intensity minimum": int(np.amin(vals)), "intensity std": float(np.std(vals)) } return self.stats def __str__(self): return f"ROI {self.id} - Channel: {self.ident} - Main: {self.main}"
PypiClean
/BlueWhale3_Network-1.4.2-cp38-cp38-macosx_10_9_x86_64.whl/orangecontrib/network/widgets/OWNxClustering.py
from Orange.data import Table from Orange.data.util import get_unique_names from Orange.widgets import gui, widget, settings from Orange.widgets.widget import Input, Output from orangecontrib.network import Network from orangecontrib.network.network import community as cd from orangewidget.settings import rename_setting from orangecontrib.network.i18n_config import * def __(key): return i18n.t('network.OWNxClustering.' + key) class OWNxClustering(widget.OWWidget): name = __('name') description = __('desc') icon = "icons/NetworkClustering.svg" priority = 6430 class Inputs: network = Input("Network", Network, default=True, label=i18n.t("network.common.network")) class Outputs: network = Output("Network", Network, label=i18n.t("network.common.network")) items = Output("Items", Table, label=i18n.t("network.common.items")) resizing_enabled = False want_main_area = False settings_version = 2 attenuate = settings.Setting(False) iterations = settings.Setting(1000) use_random_state = settings.Setting(False) hop_attenuation = settings.Setting(0.1) auto_apply = settings.Setting(True) def __init__(self): super().__init__() self.net = None self.cluster_feature = None box = gui.vBox(self.controlArea, __("box.label_pro")) gui.spin( box, self, "iterations", 1, 100000, 1, label=__("label.max_iter"), callback=self.commit) gui.doubleSpin(box, self, "hop_attenuation", 0, 1, 0.01, label=__("label.apply"), checked="attenuate", callback=self.commit) self.random_state = gui.checkBox( box, self, "use_random_state", label=__("label.replicable"), callback=self.commit) gui.auto_apply(self.controlArea, self) @Inputs.network def set_network(self, net): self.net = net self.commit() def commit(self): kwargs = {'iterations': self.iterations} if self.attenuate: alg = cd.label_propagation_hop_attenuation kwargs['delta'] = self.hop_attenuation else: alg = cd.label_propagation if self.net is None: self.Outputs.items.send(None) self.Outputs.network.send(None) self.cluster_feature = None return if self.use_random_state: kwargs['seed'] = 0 labels = alg(self.net, **kwargs) domain = self.net.nodes.domain # Tie a name for presenting clustering results to the widget instance if self.cluster_feature is None: self.cluster_feature = get_unique_names(domain, 'Cluster') net = self.net.copy() cd.add_results_to_items(net, labels, self.cluster_feature) self.Outputs.items.send(net.nodes) self.Outputs.network.send(net) nclusters = len(set(labels.values())) @classmethod def migrate_settings(cls, settings, version): if version < 2: rename_setting(settings, "autoApply", "auto_apply") if hasattr(settings, "method"): rename_setting(settings, "method", "attenuate") if __name__ == "__main__": from Orange.widgets.utils.widgetpreview import WidgetPreview from orangecontrib.network.network.readwrite \ import read_pajek, transform_data_to_orange_table from os.path import join, dirname fname = join(dirname(dirname(__file__)), 'networks', 'leu_by_genesets.net') network = read_pajek(fname) transform_data_to_orange_table(network) WidgetPreview(OWNxClustering).run(set_network=network)
PypiClean
/AnalysisProjectDependencies-0.1.tar.gz/AnalysisProjectDependencies-0.1/bower_components/cytoscape/snippets/images.js
yourDiv.style.left = 0; yourDiv.style.top = 0; yourDiv.style.width = "100%"; yourDiv.style.height = "100%"; yourDiv.style.position = "absolute"; var cytoscape = require("cytoscape"); var cy = cytoscape({ container: yourDiv, style: cytoscape.stylesheet() .selector('node') .style({ 'height': 80, 'width': 80, 'background-fit': 'cover', 'border-color': '#000', 'border-width': 3, 'border-opacity': 0.5 }) .selector('.eating') .style({ 'border-color': 'red' }) .selector('.eater') .style({ 'border-width': 9 }) .selector('edge') .style({ 'width': 6, 'target-arrow-shape': 'triangle', 'line-color': '#ffaaaa', 'target-arrow-color': '#ffaaaa' }) .selector('#bird') .style({ 'background-image': 'https://farm8.staticflickr.com/7272/7633179468_3e19e45a0c_b.jpg' }) .selector('#cat') .style({ 'background-image': 'https://farm2.staticflickr.com/1261/1413379559_412a540d29_b.jpg' }) .selector('#ladybug') .style({ 'background-image': 'https://farm4.staticflickr.com/3063/2751740612_af11fb090b_b.jpg' }) .selector('#aphid') .style({ 'background-image': 'https://farm9.staticflickr.com/8316/8003798443_32d01257c8_b.jpg' }) .selector('#rose') .style({ 'background-image': 'https://farm6.staticflickr.com/5109/5817854163_eaccd688f5_b.jpg' }) .selector('#grasshopper') .style({ 'background-image': 'https://farm7.staticflickr.com/6098/6224655456_f4c3c98589_b.jpg' }) .selector('#plant') .style({ 'background-image': 'https://farm1.staticflickr.com/231/524893064_f49a4d1d10_z.jpg' }) .selector('#wheat') .style({ 'background-image': 'https://farm3.staticflickr.com/2660/3715569167_7e978e8319_b.jpg' }), elements: { nodes: [ { data: { id: 'cat' } }, { data: { id: 'bird' } }, { data: { id: 'ladybug' } }, { data: { id: 'aphid' } }, { data: { id: 'rose' } }, { data: { id: 'grasshopper' } }, { data: { id: 'plant' } }, { data: { id: 'wheat' } } ], edges: [ { data: { source: 'cat', target: 'bird' } }, { data: { source: 'bird', target: 'ladybug' } }, { data: { source: 'bird', target: 'grasshopper' } }, { data: { source: 'grasshopper', target: 'plant' } }, { data: { source: 'grasshopper', target: 'wheat' } }, { data: { source: 'ladybug', target: 'aphid' } }, { data: { source: 'aphid', target: 'rose' } } ] }, layout: { name: 'breadthfirst', directed: true, padding: 10 } }); // cy init cy.on('tap', 'node', function(){ var nodes = this; var tapped = nodes; var food = []; nodes.addClass('eater'); for(;;){ var connectedEdges = nodes.connectedEdges(function( edge ){ return !edge.target().anySame( nodes ); }); var connectedNodes = connectedEdges.targets(); Array.prototype.push.apply( food, connectedNodes ); nodes = connectedNodes; if( nodes.empty() ){ break; } } var delay = 0; var duration = 500; for( var i = food.length - 1; i >= 0; i-- ){ (function(){ var thisFood = food[i]; var eater = thisFood.connectedEdges(function( edge ){ return edge.target().same(thisFood); }).source(); thisFood.delay( delay, function(){ eater.addClass('eating'); } ).animate({ position: eater.position(), css: { 'width': 10, 'height': 10, 'border-width': 0, 'opacity': 0 } }, { duration: duration, complete: function(){ thisFood.remove(); } }); delay += duration; })(); } // for }); // on tap
PypiClean
/Beer_Advocate_API-1.1.0.tar.gz/Beer_Advocate_API-1.1.0/BA/user.py
import requests from BA.tools import reg_sandwich, is_date, ba_parser, ba_base class User: """Stores and retrieves data from Beer Advocate for a particular user Attributes ---------- username : str The user's username user_id : str The user's user_id if the user's profile page is publice url : str The full URL to the beer's profile page reviews : str Contains first page of user's reviews info : dict Dictionary containing user's info if profile page is public """ def __init__(self, username): self.username = username self.user_id = '' first_review_page = ba_base + '/user/beers/?ba=' + username self.reviews = self._get_pagereviews(first_review_page, first_call = True) self.info = self._get_info() def _get_info(self): """"Returns user info if user has a profile page""" parser = ba_parser(attributes = ['class'], vals = ['mainProfileColumn', 'secondaryContent pairsJustified']) page_html = requests.get(self.url).text parser.feed(page_html) parser.clean_lines() user_dat = parser.lines user_info = dict() index = 0 while index in range(len(user_dat)): if user_dat[index] == 'Likes Received:': user_info.update({'Likes Received': user_dat[index + 1]}) self.username = user_dat[index + 2] break elif user_dat[index] == ':': key = user_dat[index - 1] val = user_dat[index + 1] user_info.update({key: val}) elif ':' in user_dat[index]: key = user_dat[index][:-1] val = user_dat[index + 1] user_info.update({key: val}) index += 1 if user_info == {}: self.private = True return 'Sorry, either this user is private or does not exist' self.private = False return user_info def _get_pagereviews(self, url, first_call = False): """ Returns all reviews on page """ parser = ba_parser(tags = ['div'],vals = ['ba-content'], save_comments = True, save_urls = True) page_html = requests.get(url).text parser.feed(page_html) parser.clean_lines() review_dat = parser.lines index = 0 reviews=[] while index in range(len(review_dat)): if is_date(review_dat[index], hyphened = True): if '%' in review_dat[index + 6]: reviews.append(review_dat[index:index + 7]) index+=7 else: index += 1 if first_call: self.last_page = 0 for url in parser.urls: if 'start=' in url: page_num = int(reg_sandwich('start=', '&', url)) if page_num > self.last_page: self.last_page = page_num for comment in parser.comments: if 'user_id' in comment: self.user_id = reg_sandwich('id=', '&', comment) break self.url = ba_base + '/community/members/'+ self.username + '.' \ + self.user_id return reviews def get_reviews(self, n_most_recent = 0): """Gets reviews by users Parameters ---------- If n_most_recent is 0 (default), all reviews are returned. Otherwise, n_most recent is rounded up to the nearest 50th and the result is the number of ratings that are returned, or the total number (whichever is smaller) Returns ------- list The result is a list of slists, where each element is a review by the User """ parser = ba_parser(vals = ['rating_fullview']) n_most_recent = self.last_page if n_most_recent == 0 else ((n_most_recent - 1)//50)*50 all_reviews = self.reviews[:] for page_num in range(50,n_most_recent + 1, 50): url = ba_base + '/user/beers/?start=' + str(page_num)\ + '&ba=' + self.username + '&order=dateD&view=R' all_reviews += self._get_pagereviews(url) return all_reviews
PypiClean
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/kitchen_sink/static/ks/ckeditor/lang/en-ca.js
/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.lang['en-ca']={"editor":"Rich Text Editor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Press ALT 0 for help","browseServer":"Browse Server","url":"URL","protocol":"Protocol","upload":"Upload","uploadSubmit":"Send it to the Server","image":"Image","flash":"Flash","form":"Form","checkbox":"Checkbox","radio":"Radio Button","textField":"Text Field","textarea":"Textarea","hiddenField":"Hidden Field","button":"Button","select":"Selection Field","imageButton":"Image Button","notSet":"<not set>","id":"Id","name":"Name","langDir":"Language Direction","langDirLtr":"Left to Right (LTR)","langDirRtl":"Right to Left (RTL)","langCode":"Language Code","longDescr":"Long Description URL","cssClass":"Stylesheet Classes","advisoryTitle":"Advisory Title","cssStyle":"Style","ok":"OK","cancel":"Cancel","close":"Close","preview":"Preview","resize":"Resize","generalTab":"General","advancedTab":"Advanced","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"Some of the options have been changed. Are you sure to close the dialog?","options":"Options","target":"Target","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","styles":"Style","cssClasses":"Stylesheet Classes","width":"Width","height":"Height","align":"Align","alignLeft":"Left","alignRight":"Right","alignCenter":"Centre","alignTop":"Top","alignMiddle":"Middle","alignBottom":"Bottom","invalidValue":"Invalid value.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>"},"about":{"copy":"Copyright &copy; $1. All rights reserved.","dlgTitle":"About CKEditor","help":"Check $1 for help.","moreInfo":"For licensing information please visit our web site:","title":"About CKEditor","userGuide":"CKEditor User's Guide"},"basicstyles":{"bold":"Bold","italic":"Italic","strike":"Strike Through","subscript":"Subscript","superscript":"Superscript","underline":"Underline"},"bidi":{"ltr":"Text direction from left to right","rtl":"Text direction from right to left"},"blockquote":{"toolbar":"Block Quote"},"clipboard":{"copy":"Copy","copyError":"Your browser security settings don't permit the editor to automatically execute copying operations. Please use the keyboard for that (Ctrl/Cmd+C).","cut":"Cut","cutError":"Your browser security settings don't permit the editor to automatically execute cutting operations. Please use the keyboard for that (Ctrl/Cmd+X).","paste":"Paste","pasteArea":"Paste Area","pasteMsg":"Please paste inside the following box using the keyboard (<strong>Ctrl/Cmd+V</strong>) and hit OK","securityMsg":"Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.","title":"Paste"},"colorbutton":{"auto":"Automatic","bgColorTitle":"Background Colour","colors":{"000":"Black","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Brown","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Gray","F00":"Red","FF8C00":"Dark Orange","FFD700":"Gold","008000":"Green","0FF":"Cyan","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White"},"more":"More Colours...","panelTitle":"Colors","textColorTitle":"Text Colour"},"colordialog":{"clear":"Clear","highlight":"Highlight","options":"Color Options","selected":"Selected Color","title":"Select color"},"templates":{"button":"Templates","emptyListMsg":"(No templates defined)","insertOption":"Replace actual contents","options":"Template Options","selectPromptMsg":"Please select the template to open in the editor","title":"Content Templates"},"contextmenu":{"options":"Context Menu Options"},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Advisory Title","cssClassInputLabel":"Stylesheet Classes","edit":"Edit Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Left to Right (LTR)","langDirLabel":"Language Direction","langDirRTLLabel":"Right to Left (RTL)","languageCodeInputLabel":" Language Code","remove":"Remove Div","styleSelectLabel":"Style","title":"Create Div Container","toolbar":"Create Div Container"},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"find":{"find":"Find","findOptions":"Find Options","findWhat":"Find what:","matchCase":"Match case","matchCyclic":"Match cyclic","matchWord":"Match whole word","notFoundMsg":"The specified text was not found.","replace":"Replace","replaceAll":"Replace All","replaceSuccessMsg":"%1 occurrence(s) replaced.","replaceWith":"Replace with:","title":"Find and Replace"},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"flash":{"access":"Script Access","accessAlways":"Always","accessNever":"Never","accessSameDomain":"Same domain","alignAbsBottom":"Abs Bottom","alignAbsMiddle":"Abs Middle","alignBaseline":"Baseline","alignTextTop":"Text Top","bgcolor":"Background colour","chkFull":"Allow Fullscreen","chkLoop":"Loop","chkMenu":"Enable Flash Menu","chkPlay":"Auto Play","flashvars":"Variables for Flash","hSpace":"HSpace","properties":"Flash Properties","propertiesTab":"Properties","quality":"Quality","qualityAutoHigh":"Auto High","qualityAutoLow":"Auto Low","qualityBest":"Best","qualityHigh":"High","qualityLow":"Low","qualityMedium":"Medium","scale":"Scale","scaleAll":"Show all","scaleFit":"Exact Fit","scaleNoBorder":"No Border","title":"Flash Properties","vSpace":"VSpace","validateHSpace":"HSpace must be a number.","validateSrc":"URL must not be empty.","validateVSpace":"VSpace must be a number.","windowMode":"Window mode","windowModeOpaque":"Opaque","windowModeTransparent":"Transparent","windowModeWindow":"Window"},"font":{"fontSize":{"label":"Size","voiceLabel":"Font Size","panelTitle":"Font Size"},"label":"Font","panelTitle":"Font Name","voiceLabel":"Font"},"forms":{"button":{"title":"Button Properties","text":"Text (Value)","type":"Type","typeBtn":"Button","typeSbm":"Submit","typeRst":"Reset"},"checkboxAndRadio":{"checkboxTitle":"Checkbox Properties","radioTitle":"Radio Button Properties","value":"Value","selected":"Selected"},"form":{"title":"Form Properties","menu":"Form Properties","action":"Action","method":"Method","encoding":"Encoding"},"hidden":{"title":"Hidden Field Properties","name":"Name","value":"Value"},"select":{"title":"Selection Field Properties","selectInfo":"Select Info","opAvail":"Available Options","value":"Value","size":"Size","lines":"lines","chkMulti":"Allow multiple selections","opText":"Text","opValue":"Value","btnAdd":"Add","btnModify":"Modify","btnUp":"Up","btnDown":"Down","btnSetValue":"Set as selected value","btnDelete":"Delete"},"textarea":{"title":"Textarea Properties","cols":"Columns","rows":"Rows"},"textfield":{"title":"Text Field Properties","name":"Name","value":"Value","charWidth":"Character Width","maxChars":"Maximum Characters","type":"Type","typeText":"Text","typePass":"Password","typeEmail":"Email","typeSearch":"Search","typeTel":"Telephone Number","typeUrl":"URL"}},"format":{"label":"Format","panelTitle":"Paragraph Format","tag_address":"Address","tag_div":"Normal (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Normal","tag_pre":"Formatted"},"horizontalrule":{"toolbar":"Insert Horizontal Line"},"iframe":{"border":"Show frame border","noUrl":"Please type the iframe URL","scrolling":"Enable scrollbars","title":"IFrame Properties","toolbar":"IFrame"},"image":{"alertUrl":"Please type the image URL","alt":"Alternative Text","border":"Border","btnUpload":"Send it to the Server","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"HSpace","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"Image Info","linkTab":"Link","lockRatio":"Lock Ratio","menu":"Image Properties","resetSize":"Reset Size","title":"Image Properties","titleButton":"Image Button Properties","upload":"Upload","urlMissing":"Image source URL is missing.","vSpace":"VSpace","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"indent":{"indent":"Increase Indent","outdent":"Decrease Indent"},"smiley":{"options":"Smiley Options","title":"Insert a Smiley","toolbar":"Smiley"},"justify":{"block":"Justify","center":"Centre","left":"Align Left","right":"Align Right"},"link":{"acccessKey":"Access Key","advanced":"Advanced","advisoryContentType":"Advisory Content Type","advisoryTitle":"Advisory Title","anchor":{"toolbar":"Anchor","menu":"Edit Anchor","title":"Anchor Properties","name":"Anchor Name","errorName":"Please type the anchor name","remove":"Remove Anchor"},"anchorId":"By Element Id","anchorName":"By Anchor Name","charset":"Linked Resource Charset","cssClasses":"Stylesheet Classes","emailAddress":"E-Mail Address","emailBody":"Message Body","emailSubject":"Message Subject","id":"Id","info":"Link Info","langCode":"Language Code","langDir":"Language Direction","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","menu":"Edit Link","name":"Name","noAnchors":"(No anchors available in the document)","noEmail":"Please type the e-mail address","noUrl":"Please type the link URL","other":"<other>","popupDependent":"Dependent (Netscape)","popupFeatures":"Popup Window Features","popupFullScreen":"Full Screen (IE)","popupLeft":"Left Position","popupLocationBar":"Location Bar","popupMenuBar":"Menu Bar","popupResizable":"Resizable","popupScrollBars":"Scroll Bars","popupStatusBar":"Status Bar","popupToolbar":"Toolbar","popupTop":"Top Position","rel":"Relationship","selectAnchor":"Select an Anchor","styles":"Style","tabIndex":"Tab Index","target":"Target","targetFrame":"<frame>","targetFrameName":"Target Frame Name","targetPopup":"<popup window>","targetPopupName":"Popup Window Name","title":"Link","toAnchor":"Link to anchor in the text","toEmail":"E-mail","toUrl":"URL","toolbar":"Link","type":"Link Type","unlink":"Unlink","upload":"Upload"},"list":{"bulletedlist":"Insert/Remove Bulleted List","numberedlist":"Insert/Remove Numbered List"},"liststyle":{"armenian":"Armenian numbering","bulletedTitle":"Bulleted List Properties","circle":"Circle","decimal":"Decimal (1, 2, 3, etc.)","decimalLeadingZero":"Decimal leading zero (01, 02, 03, etc.)","disc":"Disc","georgian":"Georgian numbering (an, ban, gan, etc.)","lowerAlpha":"Lower Alpha (a, b, c, d, e, etc.)","lowerGreek":"Lower Greek (alpha, beta, gamma, etc.)","lowerRoman":"Lower Roman (i, ii, iii, iv, v, etc.)","none":"None","notset":"<not set>","numberedTitle":"Numbered List Properties","square":"Square","start":"Start","type":"Type","upperAlpha":"Upper Alpha (A, B, C, D, E, etc.)","upperRoman":"Upper Roman (I, II, III, IV, V, etc.)","validateStartNumber":"List start number must be a whole number."},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"newpage":{"toolbar":"New Page"},"pagebreak":{"alt":"Page Break","toolbar":"Insert Page Break for Printing"},"pastetext":{"button":"Paste as plain text","title":"Paste as Plain Text"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"Paste from Word","toolbar":"Paste from Word"},"preview":{"preview":"Preview"},"print":{"toolbar":"Print"},"removeformat":{"toolbar":"Remove Format"},"save":{"toolbar":"Save"},"selectall":{"toolbar":"Select All"},"showblocks":{"toolbar":"Show Blocks"},"sourcearea":{"toolbar":"Source"},"specialchar":{"options":"Special Character Options","title":"Select Special Character","toolbar":"Insert Special Character"},"scayt":{"about":"About SCAYT","aboutTab":"About","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Create","dic_delete":"Delete","dic_field_name":"Dictionary name","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Rename","dic_restore":"Restore","dictionariesTab":"Dictionaries","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Languages","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Ignore Words with Numbers","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"Styles","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"Border size","caption":"Caption","cell":{"menu":"Cell","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"Delete Cells","merge":"Merge Cells","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"Cell padding","cellSpace":"Cell spacing","column":{"menu":"Column","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"Delete Columns"},"columns":"Columns","deleteTable":"Delete Table","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a number.","invalidCellSpacing":"Cell spacing must be a number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"Table Properties","row":{"menu":"Row","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"Delete Rows"},"rows":"Rows","summary":"Summary","title":"Table Properties","toolbar":"Table","widthPc":"percent","widthPx":"pixels","widthUnit":"width unit"},"undo":{"redo":"Redo","undo":"Undo"},"wsc":{"btnIgnore":"Ignore","btnIgnoreAll":"Ignore All","btnReplace":"Replace","btnReplaceAll":"Replace All","btnUndo":"Undo","changeTo":"Change to","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Spell checker not installed. Do you want to download it now?","manyChanges":"Spell check complete: %1 words changed","noChanges":"Spell check complete: No words changed","noMispell":"Spell check complete: No misspellings found","noSuggestions":"- No suggestions -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Not in dictionary","oneChange":"Spell check complete: One word changed","progress":"Spell check in progress...","title":"Spell Check","toolbar":"Check Spelling"}};
PypiClean
/OSG-Gratia-Viewer-0.1.tar.gz/OSG-Gratia-Viewer-0.1/src/gratia/web/__init__.py
from graphtool.base.xml_config import XmlConfig from gratia.database.query_handler import displayName from graphtool.web.security import DenyAll from Cheetah.Template import Template from xml.dom.minidom import parse import cherrypy, os, urllib, urllib2, re, sys # Helper functions class Gratia(XmlConfig): def __init__(self, *args, **kw): super(Gratia, self).__init__(*args, **kw) security_obj_name = self.metadata.get('security', False) if security_obj_name: self.security_obj = self.globals[security_obj_name] else: self.security_obj = DenyAll() self.metadata['template_dir'] = '$GRAPHTOOL_USER_ROOT/templates' self.template_dir = os.path.expandvars(self.metadata.get('template_dir', '.')) self.main = self.template('main.tmpl')(self.main) self.overview = self.template('overview.tmpl')(self.overview) self.vo_overview = self.template('vo_overview.tmpl')(self.vo_overview) self.vo_opp = self.template('vo_opp.tmpl')(self.vo_opp) self.vo_opp2 = self.template('vo_opp2.tmpl')(self.vo_opp2) self.vo_exitcode = self.template('vo_exitcode.tmpl')(self.vo_exitcode) self.site_owner = self.template('site.tmpl')(self.site_owner) self._cp_config ={} self.index = self.overview def template(self, name=None): template_file = os.path.join(self.template_dir, name) tclass = Template.compile(file=template_file) def template_decorator(func): def func_wrapper(*args, **kw): data = func(*args, **kw) if data.get('is_authenticated', True): base_url = self.metadata.get('base_url', '') else: base_url = self.metadata.get('base_url_noauth', '') if base_url == '/': base_url = "" addl = {'base_url': base_url} return str(tclass(namespaces=[data, addl])) func_wrapper.exposed = True return func_wrapper return template_decorator def generate_drilldown(self, option, url): def drilldown(pivot, group, base_url, filter_dict): filter_dict[option] = pivot filter_url = urllib.urlencode(filter_dict) return base_url + url + '?' + filter_url return drilldown def generate_pg_map(self, func, kw, drilldown_url, drilldown_option): map = {} map['kind'] = 'pivot-group' results, metadata = func(**kw) assert metadata['kind'] == 'pivot-group' map['name'] = metadata['name'] column_names = str(metadata.get('column_names','')) column_units = str(metadata.get('column_units','')) names = [ i.strip() for i in column_names.split(',') ] units = [ i.strip() for i in column_units.split(',') ] map['column_names'] = names map['column_units'] = units map['pivot_name'] = metadata['pivot_name'] data = {} map['data'] = data map['drilldown'] = self.generate_drilldown(drilldown_option, drilldown_url) coords = metadata['grapher'].get_coords(metadata['query'], metadata, **metadata['given_kw']) for pivot, groups in results.items(): data_groups = {} data[pivot] = data_groups if pivot in coords: coord_groups = coords[pivot] for group, val in groups.items(): if group in coord_groups: coord = str(coord_groups[group]).replace('(', '').replace(')', '') data_groups[group] = (coord, val) return map def user_roles(self, data): """ Authenticate a user and get their roles """ data['is_site_owner'] = False data['is_vo_owner'] = False data['is_view_other_users'] = False if not data['is_authenticated']: data['auth_count'] = 0 return dn = data['dn'] data['vo_ownership'] = self.security_obj.list_roles("vo_ownership", dn) data['site_ownership'] = self.security_obj.list_roles("site_ownership", \ dn) data['user_ownership'] = self.security_obj.list_roles("users", dn) auth_count = 0 if len(data['vo_ownership']) > 0: data['is_vo_owner'] = True auth_count += 1 if len(data['user_ownership']) > 0: data['is_view_other_users'] = True auth_count += 1 if len(data['site_ownership']) > 0: data['is_site_owner'] = True auth_count += 1 data['is_super_user'] = False data['auth_count'] = auth_count def user_auth(self, data): dn = cherrypy.request.headers.get('SSL-CLIENT-S-DN',None) if dn: assert cherrypy.request.headers.get('SSL-CLIENT-VERIFY', \ 'Failure') == 'SUCCESS' data['is_authenticated'] = True data['dn'] = dn data['name'] = displayName(dn) else: data['is_authenticated'] = False def assign_blank(self, dict, *args): for arg in args: if arg not in dict: dict[arg] = '' def copy_if_present(self, to_dict, from_dict, *args): for arg in args: if arg in from_dict and from_dict[arg] != '': to_dict[arg] = from_dict[arg] def refine(self, data, filter_dict, facility=True, vo=True, dn=True): relTime = data.get('relativetime', False) data['refine_vo'] = vo data['refine_facility'] = facility data['refine_dn'] = dn if relTime: if relTime == 'absolute': data['relTime'] = 'absolute' starttime = data.get('starttime', None) if starttime != None and starttime.strip() != '': filter_dict['starttime'] = starttime endtime = data.get('endtime', None) if endtime != None and endtime.strip() != '': filter_dict['endtime'] = endtime else: data['relTime'] = relTime interval = int(relTime) filter_dict['starttime'] = 'time.time()-%i' % interval filter_dict['endtime'] = 'time.time()' if interval < 4*86400: filter_dict['span'] = 3600 elif interval < 30*86400: filter_dict['span'] = 86400 else: filter_dict['span'] = 86400*7 else: data['relTime'] = 'absolute' self.copy_if_present(filter_dict, data, 'facility', 'vo', \ 'exclude-facility', 'exclude-vo', 'user', 'user', 'exclude-dn') data['query_kw'] = dict(filter_dict) data['filter_url'] = urllib.urlencode(filter_dict) self.assign_blank(filter_dict, 'facility', 'vo', 'exclude-vo', \ 'exclude-facility', 'exclude-dn', 'user') data['filter_dict'] = filter_dict if data['filter_url'] != '': data['filter_url'] = '?' + data['filter_url'] data['refine'] = os.path.join(self.template_dir, 'refine.tmpl') data['refine_error'] = None def image_map(self, data, obj_name, func_name, drilldown_url, drilldown_option): maps = data.get('maps', []) data['maps'] = maps map = self.generate_pg_map(getattr(self.globals[obj_name], func_name), data['query_kw'], \ drilldown_url, drilldown_option) maps.append(map) #if 'image_maps' not in data: data['image_maps'] = os.path.join(self.template_dir, 'image_map.tmpl') def main(self, *args, **kw): data = dict(kw) data['given_kw'] = dict(kw) self.focus(kw, data, 'main', 'facility', ['facility', 'vo', 'both']) filter_dict = {} # Do user auth: self.user_auth(data) self.user_roles(data) # Handle the refine variables self.refine(data, filter_dict, dn=False) # Generate image maps: if data['focus']['value'] == 'facility' or data['focus']['value'] == 'both': self.image_map(data, 'GratiaBarQueries', 'facility_transfer_rate', 'main', 'facility') self.image_map(data, 'GratiaBarQueries', 'facility_quality', 'main', 'facility') self.image_map(data, 'GratiaBarQueries', 'facility_transfer_volume', 'main', 'facility') else: self.image_map(data, 'GratiaBarQueries', 'vo_transfer_rate', 'main', 'vo') self.image_map(data, 'GratiaBarQueries', 'vo_quality', 'main', 'vo') self.image_map(data, 'GratiaBarQueries', 'vo_transfer_volume', 'main', 'vo') if data['is_authenticated']: data['title'] = "OSG Storage Main for %s" % data['name'] else: data['title'] = "OSG Storage Main" return data def focus(self, kw, data, page, default, values): focus_kw = dict(kw) focus = data.get('focus', default) def change_focus(view, base_url): base_url = base_url + '/' + page if view==data['focus']: return None focus_kw['focus'] = view query = urllib.urlencode(focus_kw) if len(query) > 0: query = '?' + query return base_url + query data['change_focus'] = change_focus focus = {'value': focus, 'change': change_focus} focus['tmpl'] = os.path.join(self.template_dir, 'focus.tmpl') focus['values'] = values data['focus'] = focus def site_owner(self, *args, **kw): data = dict(kw) data['given_kw'] = kw filter_dict = {} data['facility'] = data.get('facility', None) self.focus(kw, data, 'site_owner', 'user', ['user', 'vo', 'both']) #User auth self.user_auth(data) self.user_roles(data) #Handle refine self.refine(data, filter_dict, facility=False) #Generate image maps if data['focus']['value'] == 'user' or data['focus']['value'] == 'both': #self.image_map(data, 'GratiaSiteBarQueries', 'site_user_job_quality', 'site_owner', 'user') #self.image_map(data, 'GratiaSiteBarQueries', 'site_user_job_hours', 'site_owner', 'user') self.image_map(data, 'GratiaSiteBarQueries', 'site_user_transfer_quality', 'site_owner', 'user') self.image_map(data, 'GratiaSiteBarQueries', 'site_user_transfer_rate', 'site_owner', 'user') if data['focus']['value'] == 'vo' or data['focus']['value'] == 'both': #self.image_map(data, 'GratiaSiteBarQueries', 'site_vo_job_quality', 'site_owner', 'user') #self.image_map(data, 'GratiaSiteBarQueries', 'site_vo_job_hours', 'site_owner', 'user') self.image_map(data, 'GratiaSiteBarQueries', 'site_vo_transfer_quality', 'site_owner', 'user') self.image_map(data, 'GratiaSiteBarQueries', 'site_vo_transfer_rate', 'site_owner', 'user') #Empty transfer list for now #transfers, metadata = self.globals['GratiaSiteBarQueries'].site_table(data['query_kw']) transfers = [] for transfer in transfers: transfer['name'] = displayName(transfer['name']) transfer['transfer_rate'] = to_mb(transfer['transfer_rate']) + ' MB/s' transfer['bytes_transferred'] = to_mb(transfer['bytes_transferred']) + ' MB' data['transfers'] = transfers # External data external = {} data['external'] = external external['GridScan'] = self.fetch_gridscan(data.get('facility')) external['GIP Validator'] = self.gip_validation(data['facility']) return data def fetch_gridscan(self, site): doc = urllib2.urlopen('http://scan.grid.iu.edu/cgi-bin/show_results?grid=1') in_row = False in_font = False link_re = re.compile('HREF="(.*?)"') link = "#" status = "Unknown" for line in doc.readlines(): if line.find(site) >= 0: in_row = True if not in_row: continue if line.find('HREF') >= 0: m = link_re.search(line) if m: link = m.groups()[0] if line.startswith("<FONT"): in_font = True continue if in_font: status = line.strip() break return status, "http://scan.grid.iu.edu" + link def gip_validation(self, site): doc = urllib2.urlopen('http://gip-validate.grid.iu.edu/production') row_re = re.compile("<td valign='middle'>%s</td>" % site) info_re = re.compile("<td height='30' bgcolor='(.*?)'><a href='(.*?)'>") in_row = False result = "Unknown" link = "#" for line in doc.readlines(): if row_re.search(line): in_row = True continue if in_row: m = info_re.search(line) if m: color, link = m.groups() if color == 'green': result = "PASS" elif color == 'red': result = "FAIL" elif color == "black": result = "Not Reporting" else: result = "Unknown" break return result, "http://gip-validate.grid.iu.edu/production/" + link def vo_owner(self, *args, **kw): return "We're sorry, but the VO owner page has not been written." vo_owner.exposed = True def user(self, *args, **kw): return "We're sorry, but the user details page has not been written." user.exposed = True def overview(self, *args, **kw): data = dict(kw) self.user_auth(data) data['title'] = "OSG overview page" data['static_url'] = self.metadata.get('static_url', '/store/gratia') return data def get_variable_values(self, url): retval = [] try: xmldoc = urllib2.urlopen(url) except (KeyboardInterrupt, SystemExit): raise except Exception, e: print >> sys.stderr, "Exception occurred while getting variable values: %s" % str(e) return retval dom = parse(xmldoc) for pivot in dom.getElementsByTagName('pivot'): pivot_str = pivot.getAttribute('name') if len(pivot_str) > 0: retval.append(pivot_str) return retval def get_vo_list(self, vos_url, registered_vos_url, keep_vos): vos = self.get_variable_values(vos_url) info = urllib2.urlopen(registered_vos_url) reg_vos = [] for line in info.readlines(): line = line.strip() if len(line) == 0: continue reg_vos.append(line.split('<')[0].lower()) retval = [] for vo in vos: if vo in keep_vos or vo.lower() in reg_vos: retval.append(vo) for vo in keep_vos: if vo not in retval: retval.append(vo) return retval def vo_overview(self, *args, **kw): data = dict(kw) self.user_auth(data) vos_url = self.metadata.get('vos_url', '/gratia/xml/vo_corrected_table') registered_vos_url = self.metadata.get('registered_vos_url', \ 'http://www.grid.iu.edu/osg-includes/vo_txt.php') keep_vos = [i.strip() for i in self.metadata.get('keep_vos', \ '').split(',') if len(i.strip()) > 0] if kw.get('filter', 'true').lower() == 'false': vos = self.get_variable_values(vos_url) else: vos = self.get_vo_list(vos_url, registered_vos_url, keep_vos) data['vos'] = vos data['current_vo'] = kw.get('vo', None) data['static_url'] = self.metadata.get('static_url', '/store/gratia') return data def vo_opp(self, *args, **kw): data = dict(kw) self.user_auth(data) vos_url = self.metadata.get('vos_url', '/gratia/xml/vo_corrected_table') registered_vos_url = self.metadata.get('registered_vos_url', \ 'http://www.grid.iu.edu/osg-includes/vo_txt.php') keep_vos = [i.strip() for i in self.metadata.get('keep_vos', \ '').split(',') if len(i.strip()) > 0] if kw.get('filter', 'true').lower() == 'false': vos = self.get_variable_values(vos_url) else: vos = self.get_vo_list(vos_url, registered_vos_url, keep_vos) data['vos'] = vos data['current_vo'] = kw.get('vo', None) data['static_url'] = self.metadata.get('static_url', '/store/gratia') return data def vo_opp2(self, *args, **kw): data = dict(kw) self.user_auth(data) vos_url = self.metadata.get('vos_url', '/gratia/xml/vo_corrected_table') registered_vos_url = self.metadata.get('registered_vos_url', \ 'http://www.grid.iu.edu/osg-includes/vo_txt.php') keep_vos = [i.strip() for i in self.metadata.get('keep_vos', \ '').split(',') if len(i.strip()) > 0] if kw.get('filter', 'true').lower() == 'false': vos = self.get_variable_values(vos_url) else: vos = self.get_vo_list(vos_url, registered_vos_url, keep_vos) data['vos'] = vos data['current_vo'] = kw.get('vo', None) data['static_url'] = self.metadata.get('static_url', '/store/gratia') return data def vo_exitcode(self, *args, **kw): data = dict(kw) self.user_auth(data) vos_url = self.metadata.get('vos_url', '/gratia/xml/vo_corrected_table') registered_vos_url = self.metadata.get('registered_vos_url', \ 'http://www.grid.iu.edu/osg-includes/vo_txt.php') keep_vos = [i.strip() for i in self.metadata.get('keep_vos', \ '').split(',') if len(i.strip()) > 0] if kw.get('filter', 'true').lower() == 'false': vos = self.get_variable_values(vos_url) else: vos = self.get_vo_list(vos_url, registered_vos_url, keep_vos) data['vos'] = vos data['current_vo'] = kw.get('vo', None) data['static_url'] = self.metadata.get('static_url', '/store/gratia') return data
PypiClean
/Flask-AdminLTE2-1.0.0.tar.gz/Flask-AdminLTE2-1.0.0/flask_adminlte2/static/plugins/morris/morris.js
(function() { var $, Morris, minutesSpecHelper, secondsSpecHelper, __slice = [].slice, __bind = function(fn, me){ return function(){ return fn.apply(me, arguments); }; }, __hasProp = {}.hasOwnProperty, __extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }, __indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; Morris = window.Morris = {}; $ = jQuery; Morris.EventEmitter = (function() { function EventEmitter() {} EventEmitter.prototype.on = function(name, handler) { if (this.handlers == null) { this.handlers = {}; } if (this.handlers[name] == null) { this.handlers[name] = []; } this.handlers[name].push(handler); return this; }; EventEmitter.prototype.fire = function() { var args, handler, name, _i, _len, _ref, _results; name = arguments[0], args = 2 <= arguments.length ? __slice.call(arguments, 1) : []; if ((this.handlers != null) && (this.handlers[name] != null)) { _ref = this.handlers[name]; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { handler = _ref[_i]; _results.push(handler.apply(null, args)); } return _results; } }; return EventEmitter; })(); Morris.commas = function(num) { var absnum, intnum, ret, strabsnum; if (num != null) { ret = num < 0 ? "-" : ""; absnum = Math.abs(num); intnum = Math.floor(absnum).toFixed(0); ret += intnum.replace(/(?=(?:\d{3})+$)(?!^)/g, ','); strabsnum = absnum.toString(); if (strabsnum.length > intnum.length) { ret += strabsnum.slice(intnum.length); } return ret; } else { return '-'; } }; Morris.pad2 = function(number) { return (number < 10 ? '0' : '') + number; }; Morris.Grid = (function(_super) { __extends(Grid, _super); function Grid(options) { this.resizeHandler = __bind(this.resizeHandler, this); var _this = this; if (typeof options.element === 'string') { this.el = $(document.getElementById(options.element)); } else { this.el = $(options.element); } if ((this.el == null) || this.el.length === 0) { throw new Error("Graph container element not found"); } if (this.el.css('position') === 'static') { this.el.css('position', 'relative'); } this.options = $.extend({}, this.gridDefaults, this.defaults || {}, options); if (typeof this.options.units === 'string') { this.options.postUnits = options.units; } this.raphael = new Raphael(this.el[0]); this.elementWidth = null; this.elementHeight = null; this.dirty = false; this.selectFrom = null; if (this.init) { this.init(); } this.setData(this.options.data); this.el.bind('mousemove', function(evt) { var left, offset, right, width, x; offset = _this.el.offset(); x = evt.pageX - offset.left; if (_this.selectFrom) { left = _this.data[_this.hitTest(Math.min(x, _this.selectFrom))]._x; right = _this.data[_this.hitTest(Math.max(x, _this.selectFrom))]._x; width = right - left; return _this.selectionRect.attr({ x: left, width: width }); } else { return _this.fire('hovermove', x, evt.pageY - offset.top); } }); this.el.bind('mouseleave', function(evt) { if (_this.selectFrom) { _this.selectionRect.hide(); _this.selectFrom = null; } return _this.fire('hoverout'); }); this.el.bind('touchstart touchmove touchend', function(evt) { var offset, touch; touch = evt.originalEvent.touches[0] || evt.originalEvent.changedTouches[0]; offset = _this.el.offset(); return _this.fire('hovermove', touch.pageX - offset.left, touch.pageY - offset.top); }); this.el.bind('click', function(evt) { var offset; offset = _this.el.offset(); return _this.fire('gridclick', evt.pageX - offset.left, evt.pageY - offset.top); }); if (this.options.rangeSelect) { this.selectionRect = this.raphael.rect(0, 0, 0, this.el.innerHeight()).attr({ fill: this.options.rangeSelectColor, stroke: false }).toBack().hide(); this.el.bind('mousedown', function(evt) { var offset; offset = _this.el.offset(); return _this.startRange(evt.pageX - offset.left); }); this.el.bind('mouseup', function(evt) { var offset; offset = _this.el.offset(); _this.endRange(evt.pageX - offset.left); return _this.fire('hovermove', evt.pageX - offset.left, evt.pageY - offset.top); }); } if (this.options.resize) { $(window).bind('resize', function(evt) { if (_this.timeoutId != null) { window.clearTimeout(_this.timeoutId); } return _this.timeoutId = window.setTimeout(_this.resizeHandler, 100); }); } this.el.css('-webkit-tap-highlight-color', 'rgba(0,0,0,0)'); if (this.postInit) { this.postInit(); } } Grid.prototype.gridDefaults = { dateFormat: null, axes: true, grid: true, gridLineColor: '#aaa', gridStrokeWidth: 0.5, gridTextColor: '#888', gridTextSize: 12, gridTextFamily: 'sans-serif', gridTextWeight: 'normal', hideHover: false, yLabelFormat: null, xLabelAngle: 0, numLines: 5, padding: 25, parseTime: true, postUnits: '', preUnits: '', ymax: 'auto', ymin: 'auto 0', goals: [], goalStrokeWidth: 1.0, goalLineColors: ['#666633', '#999966', '#cc6666', '#663333'], events: [], eventStrokeWidth: 1.0, eventLineColors: ['#005a04', '#ccffbb', '#3a5f0b', '#005502'], rangeSelect: null, rangeSelectColor: '#eef', resize: false }; Grid.prototype.setData = function(data, redraw) { var e, idx, index, maxGoal, minGoal, ret, row, step, total, y, ykey, ymax, ymin, yval, _ref; if (redraw == null) { redraw = true; } this.options.data = data; if ((data == null) || data.length === 0) { this.data = []; this.raphael.clear(); if (this.hover != null) { this.hover.hide(); } return; } ymax = this.cumulative ? 0 : null; ymin = this.cumulative ? 0 : null; if (this.options.goals.length > 0) { minGoal = Math.min.apply(Math, this.options.goals); maxGoal = Math.max.apply(Math, this.options.goals); ymin = ymin != null ? Math.min(ymin, minGoal) : minGoal; ymax = ymax != null ? Math.max(ymax, maxGoal) : maxGoal; } this.data = (function() { var _i, _len, _results; _results = []; for (index = _i = 0, _len = data.length; _i < _len; index = ++_i) { row = data[index]; ret = { src: row }; ret.label = row[this.options.xkey]; if (this.options.parseTime) { ret.x = Morris.parseDate(ret.label); if (this.options.dateFormat) { ret.label = this.options.dateFormat(ret.x); } else if (typeof ret.label === 'number') { ret.label = new Date(ret.label).toString(); } } else { ret.x = index; if (this.options.xLabelFormat) { ret.label = this.options.xLabelFormat(ret); } } total = 0; ret.y = (function() { var _j, _len1, _ref, _results1; _ref = this.options.ykeys; _results1 = []; for (idx = _j = 0, _len1 = _ref.length; _j < _len1; idx = ++_j) { ykey = _ref[idx]; yval = row[ykey]; if (typeof yval === 'string') { yval = parseFloat(yval); } if ((yval != null) && typeof yval !== 'number') { yval = null; } if (yval != null) { if (this.cumulative) { total += yval; } else { if (ymax != null) { ymax = Math.max(yval, ymax); ymin = Math.min(yval, ymin); } else { ymax = ymin = yval; } } } if (this.cumulative && (total != null)) { ymax = Math.max(total, ymax); ymin = Math.min(total, ymin); } _results1.push(yval); } return _results1; }).call(this); _results.push(ret); } return _results; }).call(this); if (this.options.parseTime) { this.data = this.data.sort(function(a, b) { return (a.x > b.x) - (b.x > a.x); }); } this.xmin = this.data[0].x; this.xmax = this.data[this.data.length - 1].x; this.events = []; if (this.options.events.length > 0) { if (this.options.parseTime) { this.events = (function() { var _i, _len, _ref, _results; _ref = this.options.events; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { e = _ref[_i]; _results.push(Morris.parseDate(e)); } return _results; }).call(this); } else { this.events = this.options.events; } this.xmax = Math.max(this.xmax, Math.max.apply(Math, this.events)); this.xmin = Math.min(this.xmin, Math.min.apply(Math, this.events)); } if (this.xmin === this.xmax) { this.xmin -= 1; this.xmax += 1; } this.ymin = this.yboundary('min', ymin); this.ymax = this.yboundary('max', ymax); if (this.ymin === this.ymax) { if (ymin) { this.ymin -= 1; } this.ymax += 1; } if (((_ref = this.options.axes) === true || _ref === 'both' || _ref === 'y') || this.options.grid === true) { if (this.options.ymax === this.gridDefaults.ymax && this.options.ymin === this.gridDefaults.ymin) { this.grid = this.autoGridLines(this.ymin, this.ymax, this.options.numLines); this.ymin = Math.min(this.ymin, this.grid[0]); this.ymax = Math.max(this.ymax, this.grid[this.grid.length - 1]); } else { step = (this.ymax - this.ymin) / (this.options.numLines - 1); this.grid = (function() { var _i, _ref1, _ref2, _results; _results = []; for (y = _i = _ref1 = this.ymin, _ref2 = this.ymax; step > 0 ? _i <= _ref2 : _i >= _ref2; y = _i += step) { _results.push(y); } return _results; }).call(this); } } this.dirty = true; if (redraw) { return this.redraw(); } }; Grid.prototype.yboundary = function(boundaryType, currentValue) { var boundaryOption, suggestedValue; boundaryOption = this.options["y" + boundaryType]; if (typeof boundaryOption === 'string') { if (boundaryOption.slice(0, 4) === 'auto') { if (boundaryOption.length > 5) { suggestedValue = parseInt(boundaryOption.slice(5), 10); if (currentValue == null) { return suggestedValue; } return Math[boundaryType](currentValue, suggestedValue); } else { if (currentValue != null) { return currentValue; } else { return 0; } } } else { return parseInt(boundaryOption, 10); } } else { return boundaryOption; } }; Grid.prototype.autoGridLines = function(ymin, ymax, nlines) { var gmax, gmin, grid, smag, span, step, unit, y, ymag; span = ymax - ymin; ymag = Math.floor(Math.log(span) / Math.log(10)); unit = Math.pow(10, ymag); gmin = Math.floor(ymin / unit) * unit; gmax = Math.ceil(ymax / unit) * unit; step = (gmax - gmin) / (nlines - 1); if (unit === 1 && step > 1 && Math.ceil(step) !== step) { step = Math.ceil(step); gmax = gmin + step * (nlines - 1); } if (gmin < 0 && gmax > 0) { gmin = Math.floor(ymin / step) * step; gmax = Math.ceil(ymax / step) * step; } if (step < 1) { smag = Math.floor(Math.log(step) / Math.log(10)); grid = (function() { var _i, _results; _results = []; for (y = _i = gmin; step > 0 ? _i <= gmax : _i >= gmax; y = _i += step) { _results.push(parseFloat(y.toFixed(1 - smag))); } return _results; })(); } else { grid = (function() { var _i, _results; _results = []; for (y = _i = gmin; step > 0 ? _i <= gmax : _i >= gmax; y = _i += step) { _results.push(y); } return _results; })(); } return grid; }; Grid.prototype._calc = function() { var bottomOffsets, gridLine, h, i, w, yLabelWidths, _ref, _ref1; w = this.el.width(); h = this.el.height(); if (this.elementWidth !== w || this.elementHeight !== h || this.dirty) { this.elementWidth = w; this.elementHeight = h; this.dirty = false; this.left = this.options.padding; this.right = this.elementWidth - this.options.padding; this.top = this.options.padding; this.bottom = this.elementHeight - this.options.padding; if ((_ref = this.options.axes) === true || _ref === 'both' || _ref === 'y') { yLabelWidths = (function() { var _i, _len, _ref1, _results; _ref1 = this.grid; _results = []; for (_i = 0, _len = _ref1.length; _i < _len; _i++) { gridLine = _ref1[_i]; _results.push(this.measureText(this.yAxisFormat(gridLine)).width); } return _results; }).call(this); this.left += Math.max.apply(Math, yLabelWidths); } if ((_ref1 = this.options.axes) === true || _ref1 === 'both' || _ref1 === 'x') { bottomOffsets = (function() { var _i, _ref2, _results; _results = []; for (i = _i = 0, _ref2 = this.data.length; 0 <= _ref2 ? _i < _ref2 : _i > _ref2; i = 0 <= _ref2 ? ++_i : --_i) { _results.push(this.measureText(this.data[i].text, -this.options.xLabelAngle).height); } return _results; }).call(this); this.bottom -= Math.max.apply(Math, bottomOffsets); } this.width = Math.max(1, this.right - this.left); this.height = Math.max(1, this.bottom - this.top); this.dx = this.width / (this.xmax - this.xmin); this.dy = this.height / (this.ymax - this.ymin); if (this.calc) { return this.calc(); } } }; Grid.prototype.transY = function(y) { return this.bottom - (y - this.ymin) * this.dy; }; Grid.prototype.transX = function(x) { if (this.data.length === 1) { return (this.left + this.right) / 2; } else { return this.left + (x - this.xmin) * this.dx; } }; Grid.prototype.redraw = function() { this.raphael.clear(); this._calc(); this.drawGrid(); this.drawGoals(); this.drawEvents(); if (this.draw) { return this.draw(); } }; Grid.prototype.measureText = function(text, angle) { var ret, tt; if (angle == null) { angle = 0; } tt = this.raphael.text(100, 100, text).attr('font-size', this.options.gridTextSize).attr('font-family', this.options.gridTextFamily).attr('font-weight', this.options.gridTextWeight).rotate(angle); ret = tt.getBBox(); tt.remove(); return ret; }; Grid.prototype.yAxisFormat = function(label) { return this.yLabelFormat(label); }; Grid.prototype.yLabelFormat = function(label) { if (typeof this.options.yLabelFormat === 'function') { return this.options.yLabelFormat(label); } else { return "" + this.options.preUnits + (Morris.commas(label)) + this.options.postUnits; } }; Grid.prototype.drawGrid = function() { var lineY, y, _i, _len, _ref, _ref1, _ref2, _results; if (this.options.grid === false && ((_ref = this.options.axes) !== true && _ref !== 'both' && _ref !== 'y')) { return; } _ref1 = this.grid; _results = []; for (_i = 0, _len = _ref1.length; _i < _len; _i++) { lineY = _ref1[_i]; y = this.transY(lineY); if ((_ref2 = this.options.axes) === true || _ref2 === 'both' || _ref2 === 'y') { this.drawYAxisLabel(this.left - this.options.padding / 2, y, this.yAxisFormat(lineY)); } if (this.options.grid) { _results.push(this.drawGridLine("M" + this.left + "," + y + "H" + (this.left + this.width))); } else { _results.push(void 0); } } return _results; }; Grid.prototype.drawGoals = function() { var color, goal, i, _i, _len, _ref, _results; _ref = this.options.goals; _results = []; for (i = _i = 0, _len = _ref.length; _i < _len; i = ++_i) { goal = _ref[i]; color = this.options.goalLineColors[i % this.options.goalLineColors.length]; _results.push(this.drawGoal(goal, color)); } return _results; }; Grid.prototype.drawEvents = function() { var color, event, i, _i, _len, _ref, _results; _ref = this.events; _results = []; for (i = _i = 0, _len = _ref.length; _i < _len; i = ++_i) { event = _ref[i]; color = this.options.eventLineColors[i % this.options.eventLineColors.length]; _results.push(this.drawEvent(event, color)); } return _results; }; Grid.prototype.drawGoal = function(goal, color) { return this.raphael.path("M" + this.left + "," + (this.transY(goal)) + "H" + this.right).attr('stroke', color).attr('stroke-width', this.options.goalStrokeWidth); }; Grid.prototype.drawEvent = function(event, color) { return this.raphael.path("M" + (this.transX(event)) + "," + this.bottom + "V" + this.top).attr('stroke', color).attr('stroke-width', this.options.eventStrokeWidth); }; Grid.prototype.drawYAxisLabel = function(xPos, yPos, text) { return this.raphael.text(xPos, yPos, text).attr('font-size', this.options.gridTextSize).attr('font-family', this.options.gridTextFamily).attr('font-weight', this.options.gridTextWeight).attr('fill', this.options.gridTextColor).attr('text-anchor', 'end'); }; Grid.prototype.drawGridLine = function(path) { return this.raphael.path(path).attr('stroke', this.options.gridLineColor).attr('stroke-width', this.options.gridStrokeWidth); }; Grid.prototype.startRange = function(x) { this.hover.hide(); this.selectFrom = x; return this.selectionRect.attr({ x: x, width: 0 }).show(); }; Grid.prototype.endRange = function(x) { var end, start; if (this.selectFrom) { start = Math.min(this.selectFrom, x); end = Math.max(this.selectFrom, x); this.options.rangeSelect.call(this.el, { start: this.data[this.hitTest(start)].x, end: this.data[this.hitTest(end)].x }); return this.selectFrom = null; } }; Grid.prototype.resizeHandler = function() { this.timeoutId = null; this.raphael.setSize(this.el.width(), this.el.height()); return this.redraw(); }; return Grid; })(Morris.EventEmitter); Morris.parseDate = function(date) { var isecs, m, msecs, n, o, offsetmins, p, q, r, ret, secs; if (typeof date === 'number') { return date; } m = date.match(/^(\d+) Q(\d)$/); n = date.match(/^(\d+)-(\d+)$/); o = date.match(/^(\d+)-(\d+)-(\d+)$/); p = date.match(/^(\d+) W(\d+)$/); q = date.match(/^(\d+)-(\d+)-(\d+)[ T](\d+):(\d+)(Z|([+-])(\d\d):?(\d\d))?$/); r = date.match(/^(\d+)-(\d+)-(\d+)[ T](\d+):(\d+):(\d+(\.\d+)?)(Z|([+-])(\d\d):?(\d\d))?$/); if (m) { return new Date(parseInt(m[1], 10), parseInt(m[2], 10) * 3 - 1, 1).getTime(); } else if (n) { return new Date(parseInt(n[1], 10), parseInt(n[2], 10) - 1, 1).getTime(); } else if (o) { return new Date(parseInt(o[1], 10), parseInt(o[2], 10) - 1, parseInt(o[3], 10)).getTime(); } else if (p) { ret = new Date(parseInt(p[1], 10), 0, 1); if (ret.getDay() !== 4) { ret.setMonth(0, 1 + ((4 - ret.getDay()) + 7) % 7); } return ret.getTime() + parseInt(p[2], 10) * 604800000; } else if (q) { if (!q[6]) { return new Date(parseInt(q[1], 10), parseInt(q[2], 10) - 1, parseInt(q[3], 10), parseInt(q[4], 10), parseInt(q[5], 10)).getTime(); } else { offsetmins = 0; if (q[6] !== 'Z') { offsetmins = parseInt(q[8], 10) * 60 + parseInt(q[9], 10); if (q[7] === '+') { offsetmins = 0 - offsetmins; } } return Date.UTC(parseInt(q[1], 10), parseInt(q[2], 10) - 1, parseInt(q[3], 10), parseInt(q[4], 10), parseInt(q[5], 10) + offsetmins); } } else if (r) { secs = parseFloat(r[6]); isecs = Math.floor(secs); msecs = Math.round((secs - isecs) * 1000); if (!r[8]) { return new Date(parseInt(r[1], 10), parseInt(r[2], 10) - 1, parseInt(r[3], 10), parseInt(r[4], 10), parseInt(r[5], 10), isecs, msecs).getTime(); } else { offsetmins = 0; if (r[8] !== 'Z') { offsetmins = parseInt(r[10], 10) * 60 + parseInt(r[11], 10); if (r[9] === '+') { offsetmins = 0 - offsetmins; } } return Date.UTC(parseInt(r[1], 10), parseInt(r[2], 10) - 1, parseInt(r[3], 10), parseInt(r[4], 10), parseInt(r[5], 10) + offsetmins, isecs, msecs); } } else { return new Date(parseInt(date, 10), 0, 1).getTime(); } }; Morris.Hover = (function() { Hover.defaults = { "class": 'morris-hover morris-default-style' }; function Hover(options) { if (options == null) { options = {}; } this.options = $.extend({}, Morris.Hover.defaults, options); this.el = $("<div class='" + this.options["class"] + "'></div>"); this.el.hide(); this.options.parent.append(this.el); } Hover.prototype.update = function(html, x, y) { if (!html) { return this.hide(); } else { this.html(html); this.show(); return this.moveTo(x, y); } }; Hover.prototype.html = function(content) { return this.el.html(content); }; Hover.prototype.moveTo = function(x, y) { var hoverHeight, hoverWidth, left, parentHeight, parentWidth, top; parentWidth = this.options.parent.innerWidth(); parentHeight = this.options.parent.innerHeight(); hoverWidth = this.el.outerWidth(); hoverHeight = this.el.outerHeight(); left = Math.min(Math.max(0, x - hoverWidth / 2), parentWidth - hoverWidth); if (y != null) { top = y - hoverHeight - 10; if (top < 0) { top = y + 10; if (top + hoverHeight > parentHeight) { top = parentHeight / 2 - hoverHeight / 2; } } } else { top = parentHeight / 2 - hoverHeight / 2; } return this.el.css({ left: left + "px", top: parseInt(top) + "px" }); }; Hover.prototype.show = function() { return this.el.show(); }; Hover.prototype.hide = function() { return this.el.hide(); }; return Hover; })(); Morris.Line = (function(_super) { __extends(Line, _super); function Line(options) { this.hilight = __bind(this.hilight, this); this.onHoverOut = __bind(this.onHoverOut, this); this.onHoverMove = __bind(this.onHoverMove, this); this.onGridClick = __bind(this.onGridClick, this); if (!(this instanceof Morris.Line)) { return new Morris.Line(options); } Line.__super__.constructor.call(this, options); } Line.prototype.init = function() { if (this.options.hideHover !== 'always') { this.hover = new Morris.Hover({ parent: this.el }); this.on('hovermove', this.onHoverMove); this.on('hoverout', this.onHoverOut); return this.on('gridclick', this.onGridClick); } }; Line.prototype.defaults = { lineWidth: 3, pointSize: 4, lineColors: ['#0b62a4', '#7A92A3', '#4da74d', '#afd8f8', '#edc240', '#cb4b4b', '#9440ed'], pointStrokeWidths: [1], pointStrokeColors: ['#ffffff'], pointFillColors: [], smooth: true, xLabels: 'auto', xLabelFormat: null, xLabelMargin: 24, hideHover: false }; Line.prototype.calc = function() { this.calcPoints(); return this.generatePaths(); }; Line.prototype.calcPoints = function() { var row, y, _i, _len, _ref, _results; _ref = this.data; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { row = _ref[_i]; row._x = this.transX(row.x); row._y = (function() { var _j, _len1, _ref1, _results1; _ref1 = row.y; _results1 = []; for (_j = 0, _len1 = _ref1.length; _j < _len1; _j++) { y = _ref1[_j]; if (y != null) { _results1.push(this.transY(y)); } else { _results1.push(y); } } return _results1; }).call(this); _results.push(row._ymax = Math.min.apply(Math, [this.bottom].concat((function() { var _j, _len1, _ref1, _results1; _ref1 = row._y; _results1 = []; for (_j = 0, _len1 = _ref1.length; _j < _len1; _j++) { y = _ref1[_j]; if (y != null) { _results1.push(y); } } return _results1; })()))); } return _results; }; Line.prototype.hitTest = function(x) { var index, r, _i, _len, _ref; if (this.data.length === 0) { return null; } _ref = this.data.slice(1); for (index = _i = 0, _len = _ref.length; _i < _len; index = ++_i) { r = _ref[index]; if (x < (r._x + this.data[index]._x) / 2) { break; } } return index; }; Line.prototype.onGridClick = function(x, y) { var index; index = this.hitTest(x); return this.fire('click', index, this.data[index].src, x, y); }; Line.prototype.onHoverMove = function(x, y) { var index; index = this.hitTest(x); return this.displayHoverForRow(index); }; Line.prototype.onHoverOut = function() { if (this.options.hideHover !== false) { return this.displayHoverForRow(null); } }; Line.prototype.displayHoverForRow = function(index) { var _ref; if (index != null) { (_ref = this.hover).update.apply(_ref, this.hoverContentForRow(index)); return this.hilight(index); } else { this.hover.hide(); return this.hilight(); } }; Line.prototype.hoverContentForRow = function(index) { var content, j, row, y, _i, _len, _ref; row = this.data[index]; content = "<div class='morris-hover-row-label'>" + row.label + "</div>"; _ref = row.y; for (j = _i = 0, _len = _ref.length; _i < _len; j = ++_i) { y = _ref[j]; content += "<div class='morris-hover-point' style='color: " + (this.colorFor(row, j, 'label')) + "'>\n " + this.options.labels[j] + ":\n " + (this.yLabelFormat(y)) + "\n</div>"; } if (typeof this.options.hoverCallback === 'function') { content = this.options.hoverCallback(index, this.options, content, row.src); } return [content, row._x, row._ymax]; }; Line.prototype.generatePaths = function() { var coords, i, r, smooth; return this.paths = (function() { var _i, _ref, _ref1, _results; _results = []; for (i = _i = 0, _ref = this.options.ykeys.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { smooth = typeof this.options.smooth === "boolean" ? this.options.smooth : (_ref1 = this.options.ykeys[i], __indexOf.call(this.options.smooth, _ref1) >= 0); coords = (function() { var _j, _len, _ref2, _results1; _ref2 = this.data; _results1 = []; for (_j = 0, _len = _ref2.length; _j < _len; _j++) { r = _ref2[_j]; if (r._y[i] !== void 0) { _results1.push({ x: r._x, y: r._y[i] }); } } return _results1; }).call(this); if (coords.length > 1) { _results.push(Morris.Line.createPath(coords, smooth, this.bottom)); } else { _results.push(null); } } return _results; }).call(this); }; Line.prototype.draw = function() { var _ref; if ((_ref = this.options.axes) === true || _ref === 'both' || _ref === 'x') { this.drawXAxis(); } this.drawSeries(); if (this.options.hideHover === false) { return this.displayHoverForRow(this.data.length - 1); } }; Line.prototype.drawXAxis = function() { var drawLabel, l, labels, prevAngleMargin, prevLabelMargin, row, ypos, _i, _len, _results, _this = this; ypos = this.bottom + this.options.padding / 2; prevLabelMargin = null; prevAngleMargin = null; drawLabel = function(labelText, xpos) { var label, labelBox, margin, offset, textBox; label = _this.drawXAxisLabel(_this.transX(xpos), ypos, labelText); textBox = label.getBBox(); label.transform("r" + (-_this.options.xLabelAngle)); labelBox = label.getBBox(); label.transform("t0," + (labelBox.height / 2) + "..."); if (_this.options.xLabelAngle !== 0) { offset = -0.5 * textBox.width * Math.cos(_this.options.xLabelAngle * Math.PI / 180.0); label.transform("t" + offset + ",0..."); } labelBox = label.getBBox(); if (((prevLabelMargin == null) || prevLabelMargin >= labelBox.x + labelBox.width || (prevAngleMargin != null) && prevAngleMargin >= labelBox.x) && labelBox.x >= 0 && (labelBox.x + labelBox.width) < _this.el.width()) { if (_this.options.xLabelAngle !== 0) { margin = 1.25 * _this.options.gridTextSize / Math.sin(_this.options.xLabelAngle * Math.PI / 180.0); prevAngleMargin = labelBox.x - margin; } return prevLabelMargin = labelBox.x - _this.options.xLabelMargin; } else { return label.remove(); } }; if (this.options.parseTime) { if (this.data.length === 1 && this.options.xLabels === 'auto') { labels = [[this.data[0].label, this.data[0].x]]; } else { labels = Morris.labelSeries(this.xmin, this.xmax, this.width, this.options.xLabels, this.options.xLabelFormat); } } else { labels = (function() { var _i, _len, _ref, _results; _ref = this.data; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { row = _ref[_i]; _results.push([row.label, row.x]); } return _results; }).call(this); } labels.reverse(); _results = []; for (_i = 0, _len = labels.length; _i < _len; _i++) { l = labels[_i]; _results.push(drawLabel(l[0], l[1])); } return _results; }; Line.prototype.drawSeries = function() { var i, _i, _j, _ref, _ref1, _results; this.seriesPoints = []; for (i = _i = _ref = this.options.ykeys.length - 1; _ref <= 0 ? _i <= 0 : _i >= 0; i = _ref <= 0 ? ++_i : --_i) { this._drawLineFor(i); } _results = []; for (i = _j = _ref1 = this.options.ykeys.length - 1; _ref1 <= 0 ? _j <= 0 : _j >= 0; i = _ref1 <= 0 ? ++_j : --_j) { _results.push(this._drawPointFor(i)); } return _results; }; Line.prototype._drawPointFor = function(index) { var circle, row, _i, _len, _ref, _results; this.seriesPoints[index] = []; _ref = this.data; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { row = _ref[_i]; circle = null; if (row._y[index] != null) { circle = this.drawLinePoint(row._x, row._y[index], this.colorFor(row, index, 'point'), index); } _results.push(this.seriesPoints[index].push(circle)); } return _results; }; Line.prototype._drawLineFor = function(index) { var path; path = this.paths[index]; if (path !== null) { return this.drawLinePath(path, this.colorFor(null, index, 'line'), index); } }; Line.createPath = function(coords, smooth, bottom) { var coord, g, grads, i, ix, lg, path, prevCoord, x1, x2, y1, y2, _i, _len; path = ""; if (smooth) { grads = Morris.Line.gradients(coords); } prevCoord = { y: null }; for (i = _i = 0, _len = coords.length; _i < _len; i = ++_i) { coord = coords[i]; if (coord.y != null) { if (prevCoord.y != null) { if (smooth) { g = grads[i]; lg = grads[i - 1]; ix = (coord.x - prevCoord.x) / 4; x1 = prevCoord.x + ix; y1 = Math.min(bottom, prevCoord.y + ix * lg); x2 = coord.x - ix; y2 = Math.min(bottom, coord.y - ix * g); path += "C" + x1 + "," + y1 + "," + x2 + "," + y2 + "," + coord.x + "," + coord.y; } else { path += "L" + coord.x + "," + coord.y; } } else { if (!smooth || (grads[i] != null)) { path += "M" + coord.x + "," + coord.y; } } } prevCoord = coord; } return path; }; Line.gradients = function(coords) { var coord, grad, i, nextCoord, prevCoord, _i, _len, _results; grad = function(a, b) { return (a.y - b.y) / (a.x - b.x); }; _results = []; for (i = _i = 0, _len = coords.length; _i < _len; i = ++_i) { coord = coords[i]; if (coord.y != null) { nextCoord = coords[i + 1] || { y: null }; prevCoord = coords[i - 1] || { y: null }; if ((prevCoord.y != null) && (nextCoord.y != null)) { _results.push(grad(prevCoord, nextCoord)); } else if (prevCoord.y != null) { _results.push(grad(prevCoord, coord)); } else if (nextCoord.y != null) { _results.push(grad(coord, nextCoord)); } else { _results.push(null); } } else { _results.push(null); } } return _results; }; Line.prototype.hilight = function(index) { var i, _i, _j, _ref, _ref1; if (this.prevHilight !== null && this.prevHilight !== index) { for (i = _i = 0, _ref = this.seriesPoints.length - 1; 0 <= _ref ? _i <= _ref : _i >= _ref; i = 0 <= _ref ? ++_i : --_i) { if (this.seriesPoints[i][this.prevHilight]) { this.seriesPoints[i][this.prevHilight].animate(this.pointShrinkSeries(i)); } } } if (index !== null && this.prevHilight !== index) { for (i = _j = 0, _ref1 = this.seriesPoints.length - 1; 0 <= _ref1 ? _j <= _ref1 : _j >= _ref1; i = 0 <= _ref1 ? ++_j : --_j) { if (this.seriesPoints[i][index]) { this.seriesPoints[i][index].animate(this.pointGrowSeries(i)); } } } return this.prevHilight = index; }; Line.prototype.colorFor = function(row, sidx, type) { if (typeof this.options.lineColors === 'function') { return this.options.lineColors.call(this, row, sidx, type); } else if (type === 'point') { return this.options.pointFillColors[sidx % this.options.pointFillColors.length] || this.options.lineColors[sidx % this.options.lineColors.length]; } else { return this.options.lineColors[sidx % this.options.lineColors.length]; } }; Line.prototype.drawXAxisLabel = function(xPos, yPos, text) { return this.raphael.text(xPos, yPos, text).attr('font-size', this.options.gridTextSize).attr('font-family', this.options.gridTextFamily).attr('font-weight', this.options.gridTextWeight).attr('fill', this.options.gridTextColor); }; Line.prototype.drawLinePath = function(path, lineColor, lineIndex) { return this.raphael.path(path).attr('stroke', lineColor).attr('stroke-width', this.lineWidthForSeries(lineIndex)); }; Line.prototype.drawLinePoint = function(xPos, yPos, pointColor, lineIndex) { return this.raphael.circle(xPos, yPos, this.pointSizeForSeries(lineIndex)).attr('fill', pointColor).attr('stroke-width', this.pointStrokeWidthForSeries(lineIndex)).attr('stroke', this.pointStrokeColorForSeries(lineIndex)); }; Line.prototype.pointStrokeWidthForSeries = function(index) { return this.options.pointStrokeWidths[index % this.options.pointStrokeWidths.length]; }; Line.prototype.pointStrokeColorForSeries = function(index) { return this.options.pointStrokeColors[index % this.options.pointStrokeColors.length]; }; Line.prototype.lineWidthForSeries = function(index) { if (this.options.lineWidth instanceof Array) { return this.options.lineWidth[index % this.options.lineWidth.length]; } else { return this.options.lineWidth; } }; Line.prototype.pointSizeForSeries = function(index) { if (this.options.pointSize instanceof Array) { return this.options.pointSize[index % this.options.pointSize.length]; } else { return this.options.pointSize; } }; Line.prototype.pointGrowSeries = function(index) { return Raphael.animation({ r: this.pointSizeForSeries(index) + 3 }, 25, 'linear'); }; Line.prototype.pointShrinkSeries = function(index) { return Raphael.animation({ r: this.pointSizeForSeries(index) }, 25, 'linear'); }; return Line; })(Morris.Grid); Morris.labelSeries = function(dmin, dmax, pxwidth, specName, xLabelFormat) { var d, d0, ddensity, name, ret, s, spec, t, _i, _len, _ref; ddensity = 200 * (dmax - dmin) / pxwidth; d0 = new Date(dmin); spec = Morris.LABEL_SPECS[specName]; if (spec === void 0) { _ref = Morris.AUTO_LABEL_ORDER; for (_i = 0, _len = _ref.length; _i < _len; _i++) { name = _ref[_i]; s = Morris.LABEL_SPECS[name]; if (ddensity >= s.span) { spec = s; break; } } } if (spec === void 0) { spec = Morris.LABEL_SPECS["second"]; } if (xLabelFormat) { spec = $.extend({}, spec, { fmt: xLabelFormat }); } d = spec.start(d0); ret = []; while ((t = d.getTime()) <= dmax) { if (t >= dmin) { ret.push([spec.fmt(d), t]); } spec.incr(d); } return ret; }; minutesSpecHelper = function(interval) { return { span: interval * 60 * 1000, start: function(d) { return new Date(d.getFullYear(), d.getMonth(), d.getDate(), d.getHours()); }, fmt: function(d) { return "" + (Morris.pad2(d.getHours())) + ":" + (Morris.pad2(d.getMinutes())); }, incr: function(d) { return d.setUTCMinutes(d.getUTCMinutes() + interval); } }; }; secondsSpecHelper = function(interval) { return { span: interval * 1000, start: function(d) { return new Date(d.getFullYear(), d.getMonth(), d.getDate(), d.getHours(), d.getMinutes()); }, fmt: function(d) { return "" + (Morris.pad2(d.getHours())) + ":" + (Morris.pad2(d.getMinutes())) + ":" + (Morris.pad2(d.getSeconds())); }, incr: function(d) { return d.setUTCSeconds(d.getUTCSeconds() + interval); } }; }; Morris.LABEL_SPECS = { "decade": { span: 172800000000, start: function(d) { return new Date(d.getFullYear() - d.getFullYear() % 10, 0, 1); }, fmt: function(d) { return "" + (d.getFullYear()); }, incr: function(d) { return d.setFullYear(d.getFullYear() + 10); } }, "year": { span: 17280000000, start: function(d) { return new Date(d.getFullYear(), 0, 1); }, fmt: function(d) { return "" + (d.getFullYear()); }, incr: function(d) { return d.setFullYear(d.getFullYear() + 1); } }, "month": { span: 2419200000, start: function(d) { return new Date(d.getFullYear(), d.getMonth(), 1); }, fmt: function(d) { return "" + (d.getFullYear()) + "-" + (Morris.pad2(d.getMonth() + 1)); }, incr: function(d) { return d.setMonth(d.getMonth() + 1); } }, "week": { span: 604800000, start: function(d) { return new Date(d.getFullYear(), d.getMonth(), d.getDate()); }, fmt: function(d) { return "" + (d.getFullYear()) + "-" + (Morris.pad2(d.getMonth() + 1)) + "-" + (Morris.pad2(d.getDate())); }, incr: function(d) { return d.setDate(d.getDate() + 7); } }, "day": { span: 86400000, start: function(d) { return new Date(d.getFullYear(), d.getMonth(), d.getDate()); }, fmt: function(d) { return "" + (d.getFullYear()) + "-" + (Morris.pad2(d.getMonth() + 1)) + "-" + (Morris.pad2(d.getDate())); }, incr: function(d) { return d.setDate(d.getDate() + 1); } }, "hour": minutesSpecHelper(60), "30min": minutesSpecHelper(30), "15min": minutesSpecHelper(15), "10min": minutesSpecHelper(10), "5min": minutesSpecHelper(5), "minute": minutesSpecHelper(1), "30sec": secondsSpecHelper(30), "15sec": secondsSpecHelper(15), "10sec": secondsSpecHelper(10), "5sec": secondsSpecHelper(5), "second": secondsSpecHelper(1) }; Morris.AUTO_LABEL_ORDER = ["decade", "year", "month", "week", "day", "hour", "30min", "15min", "10min", "5min", "minute", "30sec", "15sec", "10sec", "5sec", "second"]; Morris.Area = (function(_super) { var areaDefaults; __extends(Area, _super); areaDefaults = { fillOpacity: 'auto', behaveLikeLine: false }; function Area(options) { var areaOptions; if (!(this instanceof Morris.Area)) { return new Morris.Area(options); } areaOptions = $.extend({}, areaDefaults, options); this.cumulative = !areaOptions.behaveLikeLine; if (areaOptions.fillOpacity === 'auto') { areaOptions.fillOpacity = areaOptions.behaveLikeLine ? .8 : 1; } Area.__super__.constructor.call(this, areaOptions); } Area.prototype.calcPoints = function() { var row, total, y, _i, _len, _ref, _results; _ref = this.data; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { row = _ref[_i]; row._x = this.transX(row.x); total = 0; row._y = (function() { var _j, _len1, _ref1, _results1; _ref1 = row.y; _results1 = []; for (_j = 0, _len1 = _ref1.length; _j < _len1; _j++) { y = _ref1[_j]; if (this.options.behaveLikeLine) { _results1.push(this.transY(y)); } else { total += y || 0; _results1.push(this.transY(total)); } } return _results1; }).call(this); _results.push(row._ymax = Math.max.apply(Math, row._y)); } return _results; }; Area.prototype.drawSeries = function() { var i, range, _i, _j, _k, _len, _ref, _ref1, _results, _results1, _results2; this.seriesPoints = []; if (this.options.behaveLikeLine) { range = (function() { _results = []; for (var _i = 0, _ref = this.options.ykeys.length - 1; 0 <= _ref ? _i <= _ref : _i >= _ref; 0 <= _ref ? _i++ : _i--){ _results.push(_i); } return _results; }).apply(this); } else { range = (function() { _results1 = []; for (var _j = _ref1 = this.options.ykeys.length - 1; _ref1 <= 0 ? _j <= 0 : _j >= 0; _ref1 <= 0 ? _j++ : _j--){ _results1.push(_j); } return _results1; }).apply(this); } _results2 = []; for (_k = 0, _len = range.length; _k < _len; _k++) { i = range[_k]; this._drawFillFor(i); this._drawLineFor(i); _results2.push(this._drawPointFor(i)); } return _results2; }; Area.prototype._drawFillFor = function(index) { var path; path = this.paths[index]; if (path !== null) { path = path + ("L" + (this.transX(this.xmax)) + "," + this.bottom + "L" + (this.transX(this.xmin)) + "," + this.bottom + "Z"); return this.drawFilledPath(path, this.fillForSeries(index)); } }; Area.prototype.fillForSeries = function(i) { var color; color = Raphael.rgb2hsl(this.colorFor(this.data[i], i, 'line')); return Raphael.hsl(color.h, this.options.behaveLikeLine ? color.s * 0.9 : color.s * 0.75, Math.min(0.98, this.options.behaveLikeLine ? color.l * 1.2 : color.l * 1.25)); }; Area.prototype.drawFilledPath = function(path, fill) { return this.raphael.path(path).attr('fill', fill).attr('fill-opacity', this.options.fillOpacity).attr('stroke', 'none'); }; return Area; })(Morris.Line); Morris.Bar = (function(_super) { __extends(Bar, _super); function Bar(options) { this.onHoverOut = __bind(this.onHoverOut, this); this.onHoverMove = __bind(this.onHoverMove, this); this.onGridClick = __bind(this.onGridClick, this); if (!(this instanceof Morris.Bar)) { return new Morris.Bar(options); } Bar.__super__.constructor.call(this, $.extend({}, options, { parseTime: false })); } Bar.prototype.init = function() { this.cumulative = this.options.stacked; if (this.options.hideHover !== 'always') { this.hover = new Morris.Hover({ parent: this.el }); this.on('hovermove', this.onHoverMove); this.on('hoverout', this.onHoverOut); return this.on('gridclick', this.onGridClick); } }; Bar.prototype.defaults = { barSizeRatio: 0.75, barGap: 3, barColors: ['#0b62a4', '#7a92a3', '#4da74d', '#afd8f8', '#edc240', '#cb4b4b', '#9440ed'], barOpacity: 1.0, barRadius: [0, 0, 0, 0], xLabelMargin: 50 }; Bar.prototype.calc = function() { var _ref; this.calcBars(); if (this.options.hideHover === false) { return (_ref = this.hover).update.apply(_ref, this.hoverContentForRow(this.data.length - 1)); } }; Bar.prototype.calcBars = function() { var idx, row, y, _i, _len, _ref, _results; _ref = this.data; _results = []; for (idx = _i = 0, _len = _ref.length; _i < _len; idx = ++_i) { row = _ref[idx]; row._x = this.left + this.width * (idx + 0.5) / this.data.length; _results.push(row._y = (function() { var _j, _len1, _ref1, _results1; _ref1 = row.y; _results1 = []; for (_j = 0, _len1 = _ref1.length; _j < _len1; _j++) { y = _ref1[_j]; if (y != null) { _results1.push(this.transY(y)); } else { _results1.push(null); } } return _results1; }).call(this)); } return _results; }; Bar.prototype.draw = function() { var _ref; if ((_ref = this.options.axes) === true || _ref === 'both' || _ref === 'x') { this.drawXAxis(); } return this.drawSeries(); }; Bar.prototype.drawXAxis = function() { var i, label, labelBox, margin, offset, prevAngleMargin, prevLabelMargin, row, textBox, ypos, _i, _ref, _results; ypos = this.bottom + (this.options.xAxisLabelTopPadding || this.options.padding / 2); prevLabelMargin = null; prevAngleMargin = null; _results = []; for (i = _i = 0, _ref = this.data.length; 0 <= _ref ? _i < _ref : _i > _ref; i = 0 <= _ref ? ++_i : --_i) { row = this.data[this.data.length - 1 - i]; label = this.drawXAxisLabel(row._x, ypos, row.label); textBox = label.getBBox(); label.transform("r" + (-this.options.xLabelAngle)); labelBox = label.getBBox(); label.transform("t0," + (labelBox.height / 2) + "..."); if (this.options.xLabelAngle !== 0) { offset = -0.5 * textBox.width * Math.cos(this.options.xLabelAngle * Math.PI / 180.0); label.transform("t" + offset + ",0..."); } if (((prevLabelMargin == null) || prevLabelMargin >= labelBox.x + labelBox.width || (prevAngleMargin != null) && prevAngleMargin >= labelBox.x) && labelBox.x >= 0 && (labelBox.x + labelBox.width) < this.el.width()) { if (this.options.xLabelAngle !== 0) { margin = 1.25 * this.options.gridTextSize / Math.sin(this.options.xLabelAngle * Math.PI / 180.0); prevAngleMargin = labelBox.x - margin; } _results.push(prevLabelMargin = labelBox.x - this.options.xLabelMargin); } else { _results.push(label.remove()); } } return _results; }; Bar.prototype.drawSeries = function() { var barWidth, bottom, groupWidth, idx, lastTop, left, leftPadding, numBars, row, sidx, size, spaceLeft, top, ypos, zeroPos; groupWidth = this.width / this.options.data.length; numBars = this.options.stacked ? 1 : this.options.ykeys.length; barWidth = (groupWidth * this.options.barSizeRatio - this.options.barGap * (numBars - 1)) / numBars; if (this.options.barSize) { barWidth = Math.min(barWidth, this.options.barSize); } spaceLeft = groupWidth - barWidth * numBars - this.options.barGap * (numBars - 1); leftPadding = spaceLeft / 2; zeroPos = this.ymin <= 0 && this.ymax >= 0 ? this.transY(0) : null; return this.bars = (function() { var _i, _len, _ref, _results; _ref = this.data; _results = []; for (idx = _i = 0, _len = _ref.length; _i < _len; idx = ++_i) { row = _ref[idx]; lastTop = 0; _results.push((function() { var _j, _len1, _ref1, _results1; _ref1 = row._y; _results1 = []; for (sidx = _j = 0, _len1 = _ref1.length; _j < _len1; sidx = ++_j) { ypos = _ref1[sidx]; if (ypos !== null) { if (zeroPos) { top = Math.min(ypos, zeroPos); bottom = Math.max(ypos, zeroPos); } else { top = ypos; bottom = this.bottom; } left = this.left + idx * groupWidth + leftPadding; if (!this.options.stacked) { left += sidx * (barWidth + this.options.barGap); } size = bottom - top; if (this.options.verticalGridCondition && this.options.verticalGridCondition(row.x)) { this.drawBar(this.left + idx * groupWidth, this.top, groupWidth, Math.abs(this.top - this.bottom), this.options.verticalGridColor, this.options.verticalGridOpacity, this.options.barRadius); } if (this.options.stacked) { top -= lastTop; } this.drawBar(left, top, barWidth, size, this.colorFor(row, sidx, 'bar'), this.options.barOpacity, this.options.barRadius); _results1.push(lastTop += size); } else { _results1.push(null); } } return _results1; }).call(this)); } return _results; }).call(this); }; Bar.prototype.colorFor = function(row, sidx, type) { var r, s; if (typeof this.options.barColors === 'function') { r = { x: row.x, y: row.y[sidx], label: row.label }; s = { index: sidx, key: this.options.ykeys[sidx], label: this.options.labels[sidx] }; return this.options.barColors.call(this, r, s, type); } else { return this.options.barColors[sidx % this.options.barColors.length]; } }; Bar.prototype.hitTest = function(x) { if (this.data.length === 0) { return null; } x = Math.max(Math.min(x, this.right), this.left); return Math.min(this.data.length - 1, Math.floor((x - this.left) / (this.width / this.data.length))); }; Bar.prototype.onGridClick = function(x, y) { var index; index = this.hitTest(x); return this.fire('click', index, this.data[index].src, x, y); }; Bar.prototype.onHoverMove = function(x, y) { var index, _ref; index = this.hitTest(x); return (_ref = this.hover).update.apply(_ref, this.hoverContentForRow(index)); }; Bar.prototype.onHoverOut = function() { if (this.options.hideHover !== false) { return this.hover.hide(); } }; Bar.prototype.hoverContentForRow = function(index) { var content, j, row, x, y, _i, _len, _ref; row = this.data[index]; content = "<div class='morris-hover-row-label'>" + row.label + "</div>"; _ref = row.y; for (j = _i = 0, _len = _ref.length; _i < _len; j = ++_i) { y = _ref[j]; content += "<div class='morris-hover-point' style='color: " + (this.colorFor(row, j, 'label')) + "'>\n " + this.options.labels[j] + ":\n " + (this.yLabelFormat(y)) + "\n</div>"; } if (typeof this.options.hoverCallback === 'function') { content = this.options.hoverCallback(index, this.options, content, row.src); } x = this.left + (index + 0.5) * this.width / this.data.length; return [content, x]; }; Bar.prototype.drawXAxisLabel = function(xPos, yPos, text) { var label; return label = this.raphael.text(xPos, yPos, text).attr('font-size', this.options.gridTextSize).attr('font-family', this.options.gridTextFamily).attr('font-weight', this.options.gridTextWeight).attr('fill', this.options.gridTextColor); }; Bar.prototype.drawBar = function(xPos, yPos, width, height, barColor, opacity, radiusArray) { var maxRadius, path; maxRadius = Math.max.apply(Math, radiusArray); if (maxRadius === 0 || maxRadius > height) { path = this.raphael.rect(xPos, yPos, width, height); } else { path = this.raphael.path(this.roundedRect(xPos, yPos, width, height, radiusArray)); } return path.attr('fill', barColor).attr('fill-opacity', opacity).attr('stroke', 'none'); }; Bar.prototype.roundedRect = function(x, y, w, h, r) { if (r == null) { r = [0, 0, 0, 0]; } return ["M", x, r[0] + y, "Q", x, y, x + r[0], y, "L", x + w - r[1], y, "Q", x + w, y, x + w, y + r[1], "L", x + w, y + h - r[2], "Q", x + w, y + h, x + w - r[2], y + h, "L", x + r[3], y + h, "Q", x, y + h, x, y + h - r[3], "Z"]; }; return Bar; })(Morris.Grid); Morris.Donut = (function(_super) { __extends(Donut, _super); Donut.prototype.defaults = { colors: ['#0B62A4', '#3980B5', '#679DC6', '#95BBD7', '#B0CCE1', '#095791', '#095085', '#083E67', '#052C48', '#042135'], backgroundColor: '#FFFFFF', labelColor: '#000000', formatter: Morris.commas, resize: false }; function Donut(options) { this.resizeHandler = __bind(this.resizeHandler, this); this.select = __bind(this.select, this); this.click = __bind(this.click, this); var _this = this; if (!(this instanceof Morris.Donut)) { return new Morris.Donut(options); } this.options = $.extend({}, this.defaults, options); if (typeof options.element === 'string') { this.el = $(document.getElementById(options.element)); } else { this.el = $(options.element); } if (this.el === null || this.el.length === 0) { throw new Error("Graph placeholder not found."); } if (options.data === void 0 || options.data.length === 0) { return; } this.raphael = new Raphael(this.el[0]); if (this.options.resize) { $(window).bind('resize', function(evt) { if (_this.timeoutId != null) { window.clearTimeout(_this.timeoutId); } return _this.timeoutId = window.setTimeout(_this.resizeHandler, 100); }); } this.setData(options.data); } Donut.prototype.redraw = function() { var C, cx, cy, i, idx, last, max_value, min, next, seg, total, value, w, _i, _j, _k, _len, _len1, _len2, _ref, _ref1, _ref2, _results; this.raphael.clear(); cx = this.el.width() / 2; cy = this.el.height() / 2; w = (Math.min(cx, cy) - 10) / 3; total = 0; _ref = this.values; for (_i = 0, _len = _ref.length; _i < _len; _i++) { value = _ref[_i]; total += value; } min = 5 / (2 * w); C = 1.9999 * Math.PI - min * this.data.length; last = 0; idx = 0; this.segments = []; _ref1 = this.values; for (i = _j = 0, _len1 = _ref1.length; _j < _len1; i = ++_j) { value = _ref1[i]; next = last + min + C * (value / total); seg = new Morris.DonutSegment(cx, cy, w * 2, w, last, next, this.data[i].color || this.options.colors[idx % this.options.colors.length], this.options.backgroundColor, idx, this.raphael); seg.render(); this.segments.push(seg); seg.on('hover', this.select); seg.on('click', this.click); last = next; idx += 1; } this.text1 = this.drawEmptyDonutLabel(cx, cy - 10, this.options.labelColor, 15, 800); this.text2 = this.drawEmptyDonutLabel(cx, cy + 10, this.options.labelColor, 14); max_value = Math.max.apply(Math, this.values); idx = 0; _ref2 = this.values; _results = []; for (_k = 0, _len2 = _ref2.length; _k < _len2; _k++) { value = _ref2[_k]; if (value === max_value) { this.select(idx); break; } _results.push(idx += 1); } return _results; }; Donut.prototype.setData = function(data) { var row; this.data = data; this.values = (function() { var _i, _len, _ref, _results; _ref = this.data; _results = []; for (_i = 0, _len = _ref.length; _i < _len; _i++) { row = _ref[_i]; _results.push(parseFloat(row.value)); } return _results; }).call(this); return this.redraw(); }; Donut.prototype.click = function(idx) { return this.fire('click', idx, this.data[idx]); }; Donut.prototype.select = function(idx) { var row, s, segment, _i, _len, _ref; _ref = this.segments; for (_i = 0, _len = _ref.length; _i < _len; _i++) { s = _ref[_i]; s.deselect(); } segment = this.segments[idx]; segment.select(); row = this.data[idx]; return this.setLabels(row.label, this.options.formatter(row.value, row)); }; Donut.prototype.setLabels = function(label1, label2) { var inner, maxHeightBottom, maxHeightTop, maxWidth, text1bbox, text1scale, text2bbox, text2scale; inner = (Math.min(this.el.width() / 2, this.el.height() / 2) - 10) * 2 / 3; maxWidth = 1.8 * inner; maxHeightTop = inner / 2; maxHeightBottom = inner / 3; this.text1.attr({ text: label1, transform: '' }); text1bbox = this.text1.getBBox(); text1scale = Math.min(maxWidth / text1bbox.width, maxHeightTop / text1bbox.height); this.text1.attr({ transform: "S" + text1scale + "," + text1scale + "," + (text1bbox.x + text1bbox.width / 2) + "," + (text1bbox.y + text1bbox.height) }); this.text2.attr({ text: label2, transform: '' }); text2bbox = this.text2.getBBox(); text2scale = Math.min(maxWidth / text2bbox.width, maxHeightBottom / text2bbox.height); return this.text2.attr({ transform: "S" + text2scale + "," + text2scale + "," + (text2bbox.x + text2bbox.width / 2) + "," + text2bbox.y }); }; Donut.prototype.drawEmptyDonutLabel = function(xPos, yPos, color, fontSize, fontWeight) { var text; text = this.raphael.text(xPos, yPos, '').attr('font-size', fontSize).attr('fill', color); if (fontWeight != null) { text.attr('font-weight', fontWeight); } return text; }; Donut.prototype.resizeHandler = function() { this.timeoutId = null; this.raphael.setSize(this.el.width(), this.el.height()); return this.redraw(); }; return Donut; })(Morris.EventEmitter); Morris.DonutSegment = (function(_super) { __extends(DonutSegment, _super); function DonutSegment(cx, cy, inner, outer, p0, p1, color, backgroundColor, index, raphael) { this.cx = cx; this.cy = cy; this.inner = inner; this.outer = outer; this.color = color; this.backgroundColor = backgroundColor; this.index = index; this.raphael = raphael; this.deselect = __bind(this.deselect, this); this.select = __bind(this.select, this); this.sin_p0 = Math.sin(p0); this.cos_p0 = Math.cos(p0); this.sin_p1 = Math.sin(p1); this.cos_p1 = Math.cos(p1); this.is_long = (p1 - p0) > Math.PI ? 1 : 0; this.path = this.calcSegment(this.inner + 3, this.inner + this.outer - 5); this.selectedPath = this.calcSegment(this.inner + 3, this.inner + this.outer); this.hilight = this.calcArc(this.inner); } DonutSegment.prototype.calcArcPoints = function(r) { return [this.cx + r * this.sin_p0, this.cy + r * this.cos_p0, this.cx + r * this.sin_p1, this.cy + r * this.cos_p1]; }; DonutSegment.prototype.calcSegment = function(r1, r2) { var ix0, ix1, iy0, iy1, ox0, ox1, oy0, oy1, _ref, _ref1; _ref = this.calcArcPoints(r1), ix0 = _ref[0], iy0 = _ref[1], ix1 = _ref[2], iy1 = _ref[3]; _ref1 = this.calcArcPoints(r2), ox0 = _ref1[0], oy0 = _ref1[1], ox1 = _ref1[2], oy1 = _ref1[3]; return ("M" + ix0 + "," + iy0) + ("A" + r1 + "," + r1 + ",0," + this.is_long + ",0," + ix1 + "," + iy1) + ("L" + ox1 + "," + oy1) + ("A" + r2 + "," + r2 + ",0," + this.is_long + ",1," + ox0 + "," + oy0) + "Z"; }; DonutSegment.prototype.calcArc = function(r) { var ix0, ix1, iy0, iy1, _ref; _ref = this.calcArcPoints(r), ix0 = _ref[0], iy0 = _ref[1], ix1 = _ref[2], iy1 = _ref[3]; return ("M" + ix0 + "," + iy0) + ("A" + r + "," + r + ",0," + this.is_long + ",0," + ix1 + "," + iy1); }; DonutSegment.prototype.render = function() { var _this = this; this.arc = this.drawDonutArc(this.hilight, this.color); return this.seg = this.drawDonutSegment(this.path, this.color, this.backgroundColor, function() { return _this.fire('hover', _this.index); }, function() { return _this.fire('click', _this.index); }); }; DonutSegment.prototype.drawDonutArc = function(path, color) { return this.raphael.path(path).attr({ stroke: color, 'stroke-width': 2, opacity: 0 }); }; DonutSegment.prototype.drawDonutSegment = function(path, fillColor, strokeColor, hoverFunction, clickFunction) { return this.raphael.path(path).attr({ fill: fillColor, stroke: strokeColor, 'stroke-width': 3 }).hover(hoverFunction).click(clickFunction); }; DonutSegment.prototype.select = function() { if (!this.selected) { this.seg.animate({ path: this.selectedPath }, 150, '<>'); this.arc.animate({ opacity: 1 }, 150, '<>'); return this.selected = true; } }; DonutSegment.prototype.deselect = function() { if (this.selected) { this.seg.animate({ path: this.path }, 150, '<>'); this.arc.animate({ opacity: 0 }, 150, '<>'); return this.selected = false; } }; return DonutSegment; })(Morris.EventEmitter); }).call(this);
PypiClean
/NeuroUnits-0.1.2.tar.gz/NeuroUnits-0.1.2/src/neurounits/importers/neuroml/neuroml_xml_data.py
# ------------------------------------------------------------------------------- # Copyright (c) 2012 Michael Hull. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ------------------------------------------------------------------------------- import re import xml.etree.cElementTree as etree from .errors import NeuroUnitsImportNeuroMLNotImplementedException def ignore(*args, **kwargs): pass def not_supported(*args, **kwargs): raise NeuroUnitsImportNeuroMLNotImplementedException() ns_regex = re.compile(r'{(?P<NS>.*)}(?P<TAG>.*)') def strip_namespace(tag): m = ns_regex.match(tag) return m.groupdict()['TAG'] def recursive_strip_namespaces(xmlNode): xmlNode.tag = strip_namespace(xmlNode.tag) for child in xmlNode.getchildren(): recursive_strip_namespaces(child) def dispatch_subnodes(node, dispatch_map): for n in node.getchildren(): if n.tag in dispatch_map: dispatch_map[n.tag](n) else: print ' -- ', n assert False class ChannelML_Q10Setting(object): def __init__(self, gate, q10factor, experimental_temp, mode): self.gate = gate self.q10factor = q10factor self.experimental_temp = experimental_temp self.mode = mode class ChannelML_GateEqn(object): def __init__(self, sn): self.name = sn.attrib['name'] self.frm = sn.attrib['from'] self.to = sn.attrib['to'] self.expr_form = sn.attrib['expr_form'] self.rate = sn.attrib.get('rate', None) self.scale = sn.attrib.get('scale', None) self.midpoint = sn.attrib.get('midpoint', None) self.expr = sn.attrib.get('expr', None) def getSubString(self, what): LUT = {'rate': lambda : '{%f}' % float(self.rate), 'scale': lambda : '{%f}' % float(self.scale), 'midpoint': lambda : '{%f}' % float(self.midpoint)} return LUT[what]() def getEqn(self): if self.expr_form == 'generic': # A hack, so we don't need to deal with the tertiary operator: if '?' in self.expr: assert self.expr.count('?') == 1 (A, BC) = self.expr.split('?') (B, C) = BC.split(':') self.expr = '[%s]if[%s]else[%s]' % (B.strip(), A.strip().replace(' ', ''), C.strip()) return self.expr scale = '{%f}' % float(self.scale) # self.getSubString('scale') rate = self.getSubString('rate') midpoint = self.getSubString('midpoint') if self.expr_form == 'sigmoid': return ' (1 * %s) / ( 1.0 + exp ( (V - %s)/%s ) ) ' \ % (rate, midpoint, scale) elif self.expr_form == 'exponential': return ' %s * exp ( 1.0 * (V- %s)/%s ) ' % (rate, midpoint, scale) elif self.expr_form == 'exp_linear': return '%s * ( (V - %s) / %s) / (1 - exp( -1.0 * ((V - %s)/%s) )) ' % (rate, midpoint, scale, midpoint, scale,) else: assert False class ChannelML_Transition(ChannelML_GateEqn): pass class ChannelML_TimeCourse(ChannelML_GateEqn): pass class ChannelML_SteadyState(ChannelML_GateEqn): pass class ChannelML_Gate(object): def load_closed_state(self, sn): self.closedstates.append(sn.attrib['id']) def load_open_state(self, sn): self.openstates.append(sn.attrib['id']) def load_time_course(self, sn): self.time_courses.append(ChannelML_TimeCourse(sn)) def load_steady_state(self, sn): self.steady_states.append(ChannelML_SteadyState(sn)) def load_transition(self, sn): self.transitions.append(ChannelML_Transition(sn)) def load_initialisation(self, sn): assert not self.initialisation self.initialisation = sn.attrib['value'] def __init__(self, node): self.openstates = [] self.closedstates = [] self.time_courses = [] self.steady_states = [] self.transitions = [] self.initialisation = None self.name = node.attrib['name'] self.instances = int(node.attrib['instances']) tag_handlers = { 'closed_state': self.load_closed_state, 'open_state': self.load_open_state, 'time_course': self.load_time_course, 'steady_state': self.load_steady_state, 'transition': self.load_transition, 'initialisation': self.load_initialisation, } # Load the Subnodes: dispatch_subnodes(node, tag_handlers) assert self.openstates assert self.closedstates class ChannelMLInfo(object): def load_Q10Settings(self, node): if 'fixed_q10' in node.attrib: setting = ChannelML_Q10Setting( gate=node.get('gate',None), q10factor=node.attrib['fixed_q10'], experimental_temp=node.attrib['experimental_temp'], mode='fixed_q10') self.q10settings.append(setting) else: assert 'q10_factor' in node.attrib setting = ChannelML_Q10Setting( gate=node.get('gate',None), q10factor=node.attrib['q10_factor'], experimental_temp=node.attrib['experimental_temp'], mode='q10_factor') self.q10settings.append(setting) def load_parameters(self, node): for parameter in node.iter('parameter'): self.parameters[parameter.get('name')] = parameter.get('value') def load_conc_factor(self,node): self.unsupported_tags = "Unsupported NeuroML tag: 'conc_factor'" def load_conc_dependence(self,node): self.unsupported_tags = "Unsupported NeuroML tag: 'conc_dependence' " self.iv_conc_dep_name = node.attrib['name'] self.iv_conc_dep_ion = node.attrib['ion'] self.iv_conc_dep_charge = int(node.attrib['charge']) self.iv_conc_dep_variable_name = node.attrib['variable_name'] self.iv_conc_dep_min_conc = node.attrib['min_conc'] self.iv_conc_dep_max_conc = node.attrib['max_conc'] def load_gate(self, node): self.gates.append(ChannelML_Gate(node)) def load_offset(self, n): self.offset = float(n.attrib['value']) def load_current_voltage_relation(self, node): self.iv_cond_law = node.attrib.get('cond_law', None) self.iv_ion = node.attrib.get('ion', None) self.iv_default_gmax = node.attrib.get('default_gmax', None) self.iv_default_erev = node.attrib.get('default_erev', None) self.iv_charge = node.attrib.get('charge', None) self.iv_fixed_erev = node.attrib.get('fixed_erev', None) tag_handlers = { 'q10_settings': self.load_Q10Settings, 'offset': self.load_offset, 'conc_dependence': self.load_conc_dependence, 'ohmic': ignore, 'conc_factor': self.load_conc_factor, 'gate': self.load_gate, } dispatch_subnodes(node, tag_handlers) def __init__(self, chl_type_node, units): print 'Loading Channel Type:', chl_type_node.get('name') self.name = chl_type_node.get('name') self.parameters = {} self.q10settings = [] self.offset = 0. self.gates = [] self.units = units # self.iv_conc_dep_name = None # self.iv_conc_dep_ion = None self.unsupported_tags = None # Sanity Checks: assert chl_type_node.get('density', 'yes') == 'yes' tag_handlers = { 'status': ignore, 'notes': ignore, 'authorList': ignore, 'publication': ignore, 'neuronDBref': ignore, 'modelDBref': ignore, 'impl_prefs': ignore, 'current_voltage_relation': self.load_current_voltage_relation, 'parameters': self.load_parameters, } dispatch_subnodes(chl_type_node, tag_handlers) def _parse_channelml_file(xmlfile): tree = etree.parse(xmlfile) root = tree.getroot() recursive_strip_namespaces(root) if root.tag != 'channelml': return {} # print xmlfile chls = {} for ch in root.iter('channel_type'): chl = ChannelMLInfo(ch, units=root.attrib['units']) chls[chl.name + ' (%s)' % xmlfile] = chl return chls
PypiClean
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/tqdm/tqdm/gui.py
from __future__ import absolute_import, division import re from warnings import warn # to inherit from the tqdm class from .std import TqdmExperimentalWarning from .std import tqdm as std_tqdm # import compatibility functions and utilities from .utils import _range __author__ = {"github.com/": ["casperdcl", "lrq3000"]} __all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange'] class tqdm_gui(std_tqdm): # pragma: no cover """Experimental Matplotlib GUI version of tqdm!""" # TODO: @classmethod: write() on GUI? def __init__(self, *args, **kwargs): from collections import deque import matplotlib as mpl import matplotlib.pyplot as plt kwargs = kwargs.copy() kwargs['gui'] = True colour = kwargs.pop('colour', 'g') super(tqdm_gui, self).__init__(*args, **kwargs) if self.disable: return warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2) self.mpl = mpl self.plt = plt # Remember if external environment uses toolbars self.toolbar = self.mpl.rcParams['toolbar'] self.mpl.rcParams['toolbar'] = 'None' self.mininterval = max(self.mininterval, 0.5) self.fig, ax = plt.subplots(figsize=(9, 2.2)) # self.fig.subplots_adjust(bottom=0.2) total = self.__len__() # avoids TypeError on None #971 if total is not None: self.xdata = [] self.ydata = [] self.zdata = [] else: self.xdata = deque([]) self.ydata = deque([]) self.zdata = deque([]) self.line1, = ax.plot(self.xdata, self.ydata, color='b') self.line2, = ax.plot(self.xdata, self.zdata, color='k') ax.set_ylim(0, 0.001) if total is not None: ax.set_xlim(0, 100) ax.set_xlabel("percent") self.fig.legend((self.line1, self.line2), ("cur", "est"), loc='center right') # progressbar self.hspan = plt.axhspan(0, 0.001, xmin=0, xmax=0, color=colour) else: # ax.set_xlim(-60, 0) ax.set_xlim(0, 60) ax.invert_xaxis() ax.set_xlabel("seconds") ax.legend(("cur", "est"), loc='lower left') ax.grid() # ax.set_xlabel('seconds') ax.set_ylabel((self.unit if self.unit else "it") + "/s") if self.unit_scale: plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) ax.yaxis.get_offset_text().set_x(-0.15) # Remember if external environment is interactive self.wasion = plt.isinteractive() plt.ion() self.ax = ax def close(self): if self.disable: return self.disable = True with self.get_lock(): self._instances.remove(self) # Restore toolbars self.mpl.rcParams['toolbar'] = self.toolbar # Return to non-interactive mode if not self.wasion: self.plt.ioff() if self.leave: self.display() else: self.plt.close(self.fig) def clear(self, *_, **__): pass def display(self, *_, **__): n = self.n cur_t = self._time() elapsed = cur_t - self.start_t delta_it = n - self.last_print_n delta_t = cur_t - self.last_print_t # Inline due to multiple calls total = self.total xdata = self.xdata ydata = self.ydata zdata = self.zdata ax = self.ax line1 = self.line1 line2 = self.line2 # instantaneous rate y = delta_it / delta_t # overall rate z = n / elapsed # update line data xdata.append(n * 100.0 / total if total else cur_t) ydata.append(y) zdata.append(z) # Discard old values # xmin, xmax = ax.get_xlim() # if (not total) and elapsed > xmin * 1.1: if (not total) and elapsed > 66: xdata.popleft() ydata.popleft() zdata.popleft() ymin, ymax = ax.get_ylim() if y > ymax or z > ymax: ymax = 1.1 * y ax.set_ylim(ymin, ymax) ax.figure.canvas.draw() if total: line1.set_data(xdata, ydata) line2.set_data(xdata, zdata) try: poly_lims = self.hspan.get_xy() except AttributeError: self.hspan = self.plt.axhspan(0, 0.001, xmin=0, xmax=0, color='g') poly_lims = self.hspan.get_xy() poly_lims[0, 1] = ymin poly_lims[1, 1] = ymax poly_lims[2] = [n / total, ymax] poly_lims[3] = [poly_lims[2, 0], ymin] if len(poly_lims) > 4: poly_lims[4, 1] = ymin self.hspan.set_xy(poly_lims) else: t_ago = [cur_t - i for i in xdata] line1.set_data(t_ago, ydata) line2.set_data(t_ago, zdata) d = self.format_dict # remove {bar} d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace( "{bar}", "<bar/>") msg = self.format_meter(**d) if '<bar/>' in msg: msg = "".join(re.split(r'\|?<bar/>\|?', msg, 1)) ax.set_title(msg, fontname="DejaVu Sans Mono", fontsize=11) self.plt.pause(1e-9) def tgrange(*args, **kwargs): """ A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`. On Python3+, `range` is used instead of `xrange`. """ return tqdm_gui(_range(*args), **kwargs) # Aliases tqdm = tqdm_gui trange = tgrange
PypiClean
/AtmosphericChemistry-0.1.0.tar.gz/AtmosphericChemistry-0.1.0/README.txt
============================= Atmospheric Chemistry Package ============================= This collection of python classes and routines facilitates the exploration, construction, and conversion of (gas-phase) chemical mechanisms (the system of differential equations that are solved in numerical models of atmospheric chemistry and transport). It consists of the following key elements: 1. The **Compound** class describing various physical and chemical properties of individual molecules or lumped substances and defining their names in various common chemistry schemes. 2. The **speciesTable** class which collects the compound information from all molecules and handles reading and writing this information from or to csv files. 3. The **Reaction** class describing gas-phase chemical reactants, products, product yields, the rate coefficient term(s) and optional tags, comments, and reaction labels. 4. The **Mechanism** class which contains a list of reactions and additional variables, comments, and other information and handles in- and output from and to various file formats (mech, csv, kpp, mozpp, racm). Additional utilities allow computation of molecular weights based on the elemental composition of a molecule and queries of the Pubchem and Master Chemical Mechanism databases to obtain additional information about a compound (e.g. SMILES code, IUPAC name, etc.). There is even a rudimentary algebra class (mathTree) included, which allows scanning of mathematical expressions and factorisation. The main purpose of this package is the interconversion of chemical mechanisms from one format to another, including the translation of species names from the namespace of one model to that of another model. It has been developed originally because of the author's need to perform an intercomparison of various mechanisms with the help of a specific chemical boxmodel (CAABA/MECCA, see http://www.mecca.messy-interface.org/), but it may also be useful for other purposes, and the author will be grateful for feedback on various use cases as well as suggestions for improvement. Conversion of a chemical mechanism is possible in only 4 lines of code:: from ac.gasphase.mechanism import Mechanism m = Mechanism.from_mech(inputfilename) m.translate_to_model('tm5') m.write_kpp_mecca(outputfilename) This will read a mechanism in the 'mech' format, translate all species names to the TM5 namespace and write out species and equations files for the Kinetic Preprocessor (KPP, see http://people.cs.vt.edu/~asandu/Software/Kpp/). It is also easily possible to test all reactions for mass conservation:: m.check_mass_balance() or to find out which reactions involve a specific reactant, say HO2:: rlist = m.find_reactions(['HO2']) for r in rlist: print r.to_mech() A number of applications of the ac package are provided in the *bin* directory of this distribution. Requirements ============ Most of the functionality of the AtmosphericChemistry package requires only python standard libraries and numpy. However, if you want to use the pubchem or mcm_query routines to access compound information from internet databases, you will also need urllib, urllib2, lxml.html, json, time. Author ====== Martin G. Schultz, IEK-8, Forschungszentrum Juelich, Germany A Acknowledgements ================== Matt Swain for providing the PubChemPy module which is included in this package for cross-referencing compound information in the master species table. Snehal Waychal for providing the mcm_query module and for packaging this up.
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/form/uploader/plugins/Flash.js
define("dojox/form/uploader/plugins/Flash",["dojo/dom-form","dojo/dom-style","dojo/dom-construct","dojo/dom-attr","dojo/_base/declare","dojo/_base/config","dojo/_base/connect","dojo/_base/lang","dojo/_base/array","dojox/form/uploader/plugins/HTML5","dojox/embed/Flash"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b){ var _c=_5("dojox.form.uploader.plugins.Flash",[],{swfPath:_6.uploaderPath||require.toUrl("dojox/form/resources/uploader.swf"),skipServerCheck:true,serverTimeout:2000,isDebug:false,devMode:false,deferredUploading:0,force:"",postMixInProperties:function(){ if(!this.supports("multiple")){ this.uploadType="flash"; this._files=[]; this._fileMap={}; this._createInput=this._createFlashUploader; this.getFileList=this.getFlashFileList; this.reset=this.flashReset; this.upload=this.uploadFlash; this.fieldname="flashUploadFiles"; } this.inherited(arguments); },onReady:function(_d){ },onLoad:function(_e){ },onFileChange:function(_f){ },onFileProgress:function(_10){ },getFlashFileList:function(){ return this._files; },flashReset:function(){ this.flashMovie.reset(); this._files=[]; },uploadFlash:function(_11){ this.onBegin(this.getFileList()); this.flashMovie.doUpload(_11); },_change:function(_12){ this._files=this._files.concat(_12); _9.forEach(_12,function(f){ f.bytesLoaded=0; f.bytesTotal=f.size; this._fileMap[f.name+"_"+f.size]=f; },this); this.onChange(this._files); this.onFileChange(_12); },_complete:function(_13){ var o=this._getCustomEvent(); o.type="load"; this.onComplete(_13); },_progress:function(f){ this._fileMap[f.name+"_"+f.bytesTotal].bytesLoaded=f.bytesLoaded; var o=this._getCustomEvent(); this.onFileProgress(f); this.onProgress(o); },_error:function(err){ this.onError(err); },_onFlashBlur:function(_14){ },_getCustomEvent:function(){ var o={bytesLoaded:0,bytesTotal:0,type:"progress",timeStamp:new Date().getTime()}; for(var nm in this._fileMap){ o.bytesTotal+=this._fileMap[nm].bytesTotal; o.bytesLoaded+=this._fileMap[nm].bytesLoaded; } o.decimal=o.bytesLoaded/o.bytesTotal; o.percent=Math.ceil((o.bytesLoaded/o.bytesTotal)*100)+"%"; return o; },_connectFlash:function(){ this._subs=[]; this._cons=[]; var _15=_8.hitch(this,function(s,_16){ this._subs.push(_7.subscribe(this.id+s,this,_16)); }); _15("/filesSelected","_change"); _15("/filesUploaded","_complete"); _15("/filesProgress","_progress"); _15("/filesError","_error"); _15("/filesCanceled","onCancel"); _15("/stageBlur","_onFlashBlur"); this.connect(this.domNode,"focus",function(){ this.flashMovie.focus(); this.flashMovie.doFocus(); }); if(this.tabIndex>=0){ _4.set(this.domNode,"tabIndex",this.tabIndex); } },_createFlashUploader:function(){ var url=this.getUrl(); if(url){ if(url.toLowerCase().indexOf("http")<0&&url.indexOf("/")!=0){ var loc=window.location.href.split("/"); loc.pop(); loc=loc.join("/")+"/"; url=loc+url; } }else{ console.warn("Warning: no uploadUrl provided."); } this.inputNode=_3.create("div",{className:"dojoxFlashNode"},this.domNode,"first"); _2.set(this.inputNode,{position:"absolute",top:"-2px",width:this.btnSize.w+"px",height:this.btnSize.h+"px",opacity:0}); var w=this.btnSize.w; var h=this.btnSize.h; var _17={expressInstall:true,path:(this.swfPath.uri||this.swfPath)+"?cb_"+(new Date().getTime()),width:w,height:h,allowScriptAccess:"always",allowNetworking:"all",vars:{uploadDataFieldName:this.flashFieldName||this.name+"Flash",uploadUrl:url,uploadOnSelect:this.uploadOnSelect,deferredUploading:this.deferredUploading||0,selectMultipleFiles:this.multiple,id:this.id,isDebug:this.isDebug,noReturnCheck:this.skipServerCheck,serverTimeout:this.serverTimeout},params:{scale:"noscale",wmode:"transparent",wmode:"opaque",allowScriptAccess:"always",allowNetworking:"all"}}; this.flashObject=new _b(_17,this.inputNode); this.flashObject.onError=_8.hitch(function(msg){ console.error("Flash Error: "+msg); }); this.flashObject.onReady=_8.hitch(this,function(){ this.onReady(this); }); this.flashObject.onLoad=_8.hitch(this,function(mov){ this.flashMovie=mov; this.flashReady=true; this.onLoad(this); }); this._connectFlash(); }}); dojox.form.addUploaderPlugin(_c); return _c; });
PypiClean
/GithubStat-1.2.tar.gz/GithubStat-1.2/README.md
# GithubStat **A Simple Github User Statistics Meter based on Github-API.** [![Author](https://img.shields.io/badge/Author-HTR--TECH-blue)](https://github.com/htr-tech) [![Opensource](https://img.shields.io/badge/Open%20Source-Yes-green)](#) [![Version](https://badge.fury.io/py/GithubStat.svg)](https://badge.fury.io/py/GithubStat) [![Python Version](https://img.shields.io/pypi/pyversions/GithubStat.svg)](https://pypi.org/project/GithubStat) [![Total Downloads](https://pepy.tech/badge/GithubStat)](https://pepy.tech/project/GithubStat) [![Monthly Downloads](https://pepy.tech/badge/GithubStat/month)](https://pepy.tech/project/GithubStat/month) ### Installation : - **Install GithubStat via Pypi :** ``` $ pip install GithubStat ``` - Now simply Type ``` $ GithubStat <github-username> ``` - **Use it via console :** ```python >>> from GithubStat.__init__ import stats >>> stats('htr-tech') [-] Username : htr-tech [-] Name : Tahmid Rayat [-] Followers : 1969 [-] Following : 12 ...... ``` - **Install GithubStat via Github :** ``` $ git clone https://github.com/htr-tech/GithubStat.git $ cd GithubStat $ python setup.py install $ GithubStat <github-username> ``` ***Copyright (c) 2022 Tahmid Rayat Under [MIT LICENSE](https://github.com/htr-tech/GithubStat/blob/master/LICENSE#L1)*** ### *📡 Get in Touch :* [![Github](https://img.shields.io/badge/Github-525252?style=for-the-badge&logo=github)](https://github.com/htr-tech) [![Facebook](https://img.shields.io/badge/Facebook-3b5998?style=for-the-badge&logo=facebook)](https://fb.com/tahmid.rayat.official) [![Instagram](https://img.shields.io/badge/Instagram-8a3ab9?style=for-the-badge&logo=instagram)](https://www.instagram.com/tahmid.rayat)
PypiClean
/Mathics-1.0.tar.gz/Mathics-1.0/mathics/web/media/js/mathjax/jax/output/SVG/fonts/TeX/Typewriter/Regular/BasicLatin.js
MathJax.Hub.Insert(MathJax.OutputJax.SVG.FONTDATA.FONTS.MathJax_Typewriter,{32:[0,0,250,0,0,""],33:[622,0,525,206,320,"206 565Q206 590 222 606T265 622Q287 621 303 606T319 565T314 392L308 216Q299 194 273 194H262Q247 194 241 195T228 200T217 216L211 392Q206 539 206 565ZM206 56Q206 83 223 99T265 115Q288 113 304 99T320 58Q320 33 303 17T262 0Q237 0 222 17T206 56"],34:[623,-333,525,122,402,"122 575Q122 593 137 608T173 623Q196 623 210 608T225 575Q225 562 218 464Q212 373 211 361T201 341Q193 333 173 333Q154 333 146 341Q138 348 137 360T129 464Q122 561 122 575ZM299 575Q299 593 314 608T350 623Q373 623 387 608T402 575Q402 562 395 464Q389 373 388 361T378 341Q370 333 350 333Q331 333 323 341Q315 348 314 360T306 464Q299 561 299 575"],35:[612,0,525,36,489,"93 163Q74 163 65 164T46 173T36 198Q36 210 40 215T61 233H131V236Q132 239 140 307T149 377Q149 379 105 379L61 380Q36 392 36 414Q36 450 86 450Q91 450 99 450T112 449H159Q163 480 167 517Q168 524 170 545T174 573T180 591T191 607T210 611Q223 611 232 604T243 588L245 580Q245 565 238 511T230 451Q230 449 282 449H333Q337 480 341 517Q342 524 343 537T345 556T348 573T352 589T359 600T370 608T384 611Q395 611 406 602T419 580Q419 565 412 511T404 451Q404 449 431 449H442Q477 449 485 429Q489 421 489 414Q489 392 463 380L428 379H394V376Q393 373 385 305T376 235Q376 233 419 233H463L468 230Q472 227 473 227T477 223T482 218T486 213T488 206T489 198Q489 162 436 162Q430 162 422 162T412 163H366V161Q364 159 357 92Q356 85 355 73T353 54T350 37T346 22T339 11T328 3T314 0Q303 0 292 9T279 31Q279 37 287 96T295 162Q295 163 244 163H192V161Q190 159 183 92Q182 85 181 73T179 54T176 37T172 22T165 11T154 3T140 0Q129 0 118 9T105 31Q105 37 113 96T121 162Q121 163 93 163ZM323 377Q323 379 272 379H220V376Q219 373 211 305T202 235Q202 233 253 233H305V236Q306 239 314 307T323 377"],36:[694,82,525,58,466,"415 397Q392 397 377 411T362 448Q362 464 376 485Q369 498 362 506T346 520T332 528T315 533T300 538V445L301 353L311 350Q382 334 424 284T466 174Q466 115 425 65T303 -2L300 -3V-30Q300 -64 291 -74Q283 -82 262 -82H255Q234 -82 225 -60L224 -32V-4L213 -2Q152 6 106 51T59 170V180Q59 197 74 213Q89 227 110 227T146 213T162 174Q162 156 147 137Q153 123 161 112T176 95T191 85T205 79T216 76T224 74V283L213 285Q147 298 103 343T58 449Q58 516 108 560T224 614V643V654Q224 666 226 673T237 687T264 694Q289 693 294 683T300 642V615H303Q355 607 390 587T440 540T460 493T466 453Q466 425 451 411T415 397ZM137 452Q137 425 158 404T198 376T223 369Q224 369 224 453T223 537Q198 532 168 509T137 452ZM301 75Q307 75 325 83T365 116T387 171Q387 238 300 267V171Q300 75 301 75"],37:[694,83,525,35,489,"35 560Q35 607 54 645T110 693Q111 693 116 693T125 694Q165 692 187 651T210 560Q210 506 186 467T123 428Q84 428 60 466T35 560ZM139 560Q139 574 136 587T130 608T124 615Q122 617 120 614Q106 595 106 561Q106 516 121 506Q123 504 125 507Q139 526 139 560ZM123 -83Q107 -83 98 -73T88 -48Q88 -43 89 -41Q90 -37 229 316T370 675Q381 694 400 694Q416 694 426 684T436 659Q436 654 435 652Q434 647 295 294T153 -65Q144 -83 123 -83ZM314 50Q314 104 338 143T400 183Q439 183 464 144T489 50T465 -43T402 -82Q358 -82 336 -41T314 50ZM417 50Q417 71 413 85T405 102L401 106Q386 95 386 50Q386 29 390 15T398 -2L402 -6Q417 5 417 50"],38:[622,11,525,28,490,"96 462Q96 546 132 584T211 622Q255 622 284 583T314 474Q314 395 224 305L208 288Q213 275 226 251L265 185L269 179Q273 184 299 246L332 333L342 363Q342 364 341 365Q334 365 334 393Q334 406 334 410T340 420T356 431H412H440Q467 431 478 424T490 393Q490 376 484 367T470 357T448 355H441H415L399 312Q349 176 322 127L315 115L323 106Q360 65 393 65Q405 65 410 80T416 109Q416 140 452 140Q487 140 487 105Q487 56 460 23T391 -11L286 41L273 53L262 42Q212 -11 151 -11Q97 -11 63 33T28 143Q28 161 30 176T38 205T47 227T60 247T72 261T84 274T94 283L122 311L119 323Q96 392 96 462ZM243 474Q243 533 218 545L215 546Q212 546 210 546Q182 546 169 501Q167 492 167 466Q167 419 179 368L188 377Q234 425 242 461Q243 465 243 474ZM217 129Q185 174 154 235Q121 214 115 176Q113 168 113 143Q113 83 139 67Q141 66 152 66Q191 66 228 112L217 129"],39:[611,-287,525,175,349,"205 554Q205 577 221 594T263 611Q302 611 325 577T349 490Q349 409 298 347Q285 330 258 309T214 287Q203 289 189 302T175 327Q175 341 185 349T213 369T245 402Q269 437 273 483V497Q264 496 263 496Q240 496 223 513T205 554"],40:[694,82,525,166,437,"437 -53Q437 -82 399 -82H394Q377 -82 342 -55Q259 7 213 102T166 306Q166 412 211 507T342 667Q377 694 393 694H399Q437 694 437 665Q437 654 426 643T397 620T356 584T311 525Q301 511 290 488T264 412T250 306Q250 191 300 105T422 -27Q437 -37 437 -53"],41:[694,82,525,87,358,"87 664Q87 694 126 694Q138 694 147 690T183 667Q266 605 312 510T358 306Q358 193 307 93T161 -70Q142 -82 126 -82Q105 -82 96 -73T87 -53Q87 -47 88 -44Q92 -36 116 -19T173 34T230 119Q273 206 273 306Q273 408 231 494T109 635Q87 649 87 664"],42:[520,-89,525,68,456,"222 487Q224 501 235 510T262 520Q279 520 289 510T302 487Q302 458 301 429Q301 421 301 413T301 398T300 386T300 377V374Q300 373 301 373Q304 373 353 403T416 434Q432 434 444 423T456 393Q456 389 456 386T454 379T451 373T448 368T442 363T436 358T427 353T417 348T405 342T391 334Q345 309 339 305L388 279Q400 273 412 266T432 255T441 250Q456 238 456 218Q456 200 445 189T417 177Q403 177 354 207T301 238Q300 238 300 237V234Q300 231 300 226T300 214T301 199T301 182Q302 153 302 124Q300 109 289 100T262 90T235 100T222 124Q222 153 223 182Q223 190 223 198T223 213T224 225T224 234V237Q224 238 223 238Q220 238 171 208T108 177Q92 177 80 188T68 218Q68 237 79 246T134 277Q180 303 185 306L136 332Q124 338 112 345T92 356T83 361Q68 373 68 393Q68 411 79 422T107 434Q121 434 170 404T223 373Q224 373 224 374V377Q224 380 224 385T224 397T223 412T223 429Q222 458 222 487"],43:[531,-81,525,38,487,"147 271Q138 271 122 271T98 270Q68 270 53 277T38 306T53 335T98 342Q105 342 121 342T147 341H227V423L228 505Q241 531 262 531Q268 531 273 530T282 525T287 519T293 511L297 505V341H377H430Q457 341 467 338T483 321Q487 313 487 306Q487 295 480 286T463 273Q457 271 377 271H297V107Q281 81 262 81Q250 81 242 87T230 100L228 107L227 189V271H147"],44:[140,139,525,173,353,"193 37T193 70T213 121T260 140Q302 140 327 108T353 36Q353 -7 336 -43T294 -98T249 -128T215 -139Q204 -139 189 -125Q177 -111 174 -101Q172 -84 183 -77T217 -61T253 -33Q261 -24 272 1L265 0Q234 0 214 18"],45:[341,-271,525,57,468,"57 306Q57 333 86 341H438Q468 332 468 306T438 271H86Q57 280 57 306"],46:[140,-1,525,193,332,"193 70Q193 105 214 122T258 140Q291 140 311 120T332 70Q332 44 314 23T262 1Q234 1 214 18T193 70"],47:[694,83,525,58,466,"94 -83Q78 -83 68 -73T58 -48Q58 -44 60 -36Q62 -31 227 314T399 673Q410 694 431 694Q445 694 455 684T466 659Q466 656 464 648Q463 643 298 298T125 -62Q114 -83 94 -83"],48:[621,10,525,42,482,"42 305Q42 450 111 535T257 621Q335 621 390 562Q482 468 482 306Q482 174 418 82T262 -10T106 82T42 305ZM257 545Q209 545 168 481T126 320Q126 220 162 147Q204 65 262 65Q318 65 358 139T398 320V328Q395 411 364 470T284 543Q270 545 257 545"],49:[622,-1,525,99,450,"99 461Q99 470 99 474T104 487T120 498T151 502Q213 517 251 596Q264 622 283 622Q308 622 319 597V76H373H401Q428 76 439 69T450 38Q450 11 428 1H127Q104 10 104 38Q104 62 115 69T153 76H181H235V269Q235 461 234 461Q184 426 137 424H133Q124 424 119 425T109 431T99 447V461"],50:[622,-1,525,52,472,"52 462Q52 528 110 575T247 622H250Q343 622 407 565T472 421Q472 371 446 324T390 248T308 178Q307 177 275 151T214 101L185 77Q185 76 286 76H388V87Q388 105 397 114T430 123T463 114Q470 107 471 100T472 61V42Q472 24 468 16T450 1H75Q53 10 53 32V38V48Q53 57 63 67T127 122Q153 144 169 157L289 256Q388 345 388 419Q388 473 346 509T231 545H224Q176 545 146 499L144 494Q155 476 155 459Q154 459 155 455T154 444T148 430T136 417T114 408Q113 408 110 408T104 407Q80 407 66 422T52 462"],51:[622,11,525,44,479,"260 546Q233 546 211 541T180 531T171 524L174 514Q177 505 177 497Q177 476 162 461T125 446Q106 446 90 459T73 504Q76 540 98 565T150 601T203 616T239 621Q241 622 265 622Q322 620 362 602T420 558T444 513T451 478Q451 386 369 329L375 326Q381 323 386 320T401 311T419 298T436 283T452 263T466 240T475 212T479 180Q479 99 416 44T259 -11T105 28T44 130Q44 154 59 168T95 183Q117 183 132 169T148 131Q148 119 139 101Q175 65 260 65Q316 65 355 97T395 179Q395 211 375 240Q336 292 253 292H234H215Q194 292 185 299T175 330Q175 350 184 359Q192 368 238 370T309 384Q336 398 351 423T367 474Q367 496 350 513Q321 546 260 546"],52:[623,-1,525,29,495,"235 1Q213 10 213 32V38V46Q213 65 230 73Q236 76 274 76H314V168H183L52 169Q37 175 33 182T29 205V218L30 244Q53 283 155 443T264 613Q276 623 298 623H323H363Q378 616 385 601V244H429H450Q474 244 484 237T495 206Q495 179 477 171Q471 168 429 168H385V76H425H442Q466 76 476 69T487 38Q487 10 465 1H235ZM314 244V554L117 245L215 244H314"],53:[612,10,525,52,472,"387 189Q387 244 354 278T273 313Q230 313 205 301T163 271T138 249H120Q102 249 97 251Q85 258 83 266T80 311Q80 320 80 359T81 430Q81 587 82 591Q88 605 103 610H108Q112 610 120 610T138 610T163 610T192 611T225 611T260 611H415Q416 610 421 607T428 602T432 596T436 587T437 573Q437 567 437 562T434 554T431 548T427 543T423 540T418 538L415 536L289 535H164V363L170 366Q175 368 184 372T207 380T238 386T276 389Q357 389 414 331T472 187Q472 116 412 53T245 -10Q218 -10 209 -9Q126 5 89 48T52 137Q52 164 68 177T104 191Q130 191 143 175T156 141Q156 132 154 125T149 113T146 107Q146 104 155 95T188 76T245 65Q298 65 342 98T387 189"],54:[622,11,525,44,479,"357 536Q357 546 318 546Q258 546 205 497T133 357V353L144 361Q210 402 285 402Q362 402 414 350Q479 285 479 193Q479 111 418 50T263 -11Q234 -11 207 -3T149 26T97 81T60 171T45 301Q45 444 129 533T319 622Q388 622 421 589T454 510Q454 491 442 475T402 458Q373 458 362 475T350 510Q350 520 354 528L357 536ZM319 326T269 326T179 298T136 223Q136 202 143 174T176 112T237 68Q246 66 265 66Q319 66 360 107Q395 146 395 197Q395 250 356 289Q319 326 269 326"],55:[627,10,525,44,480,"204 -10Q162 -10 162 40Q162 146 198 261T310 477Q311 478 321 491T342 517T358 535H128V524Q128 506 119 497Q111 489 86 489H78Q55 489 46 508Q44 513 44 557V580Q44 605 52 616T88 627H93Q114 627 125 611H458Q474 598 477 593T480 573Q480 559 478 553T469 543T446 521T408 477Q252 290 246 49Q246 43 246 37T246 27T245 22Q243 11 233 1T204 -10"],56:[621,10,525,45,480,"58 460Q58 523 117 572T254 621Q290 621 298 620Q376 607 421 560T466 460Q466 441 460 424T443 393T421 370T397 352T374 340T357 332L350 330L356 328Q363 325 371 321T392 310T415 295T439 274T459 249T473 217T479 179Q479 102 418 46T262 -10T106 46T45 179Q45 202 52 222T70 257T96 284T123 305T148 319T167 328L174 330L170 332Q166 333 159 336T145 343Q104 362 81 393T58 460ZM382 458Q382 491 349 518T263 546Q215 546 179 521T142 458Q142 421 178 395T262 368Q315 368 348 396T382 458ZM396 178Q396 223 358 257T263 292Q206 292 167 258T128 178Q128 137 163 102T262 66Q324 66 360 101T396 178"],57:[622,11,525,46,479,"392 259Q333 210 236 210H233Q163 210 109 262Q46 325 46 411T99 550Q164 622 264 622Q293 622 319 615T376 587T428 532T464 440T479 304Q479 167 400 78T217 -11Q140 -11 105 22T70 101Q70 124 84 138T122 153Q150 153 162 137T174 101Q174 91 168 76Q179 65 216 65Q267 65 300 93Q322 109 339 130T366 173T380 210T388 242T392 259ZM388 389Q388 438 357 492T268 546T185 520Q129 479 129 415Q129 384 138 363Q145 349 156 334T195 302T255 285Q305 285 345 313T388 389"],58:[431,-1,525,193,332,"193 361Q193 396 214 413T258 431Q291 431 311 411T332 361Q332 335 314 314T262 292Q234 292 214 309T193 361ZM193 70Q193 105 214 122T258 140Q291 140 311 120T332 70Q332 44 314 23T262 1Q234 1 214 18T193 70"],59:[431,139,525,175,337,"193 361Q193 396 214 413T258 431Q291 431 311 411T332 361Q332 335 314 314T262 292Q234 292 214 309T193 361ZM193 70Q193 105 214 122T259 140Q301 140 319 108T337 33Q337 -38 291 -88T214 -139Q203 -139 189 -126T175 -97Q175 -85 182 -78T200 -66T225 -50T249 -17Q256 -3 256 0Q252 1 248 1Q242 2 235 5T218 15T200 36T193 70"],60:[557,-55,525,57,469,"468 90Q468 76 458 66T433 55Q426 55 419 58Q413 61 243 168T68 280Q57 291 57 306T68 332Q72 335 241 442T416 553Q424 557 432 557Q447 557 457 547T468 522T456 496Q454 494 305 399L158 306L305 213Q341 190 390 159Q443 125 452 119T464 106V105Q468 97 468 90"],61:[417,-195,525,38,487,"38 382Q38 409 67 417H457Q487 408 487 382Q487 358 461 348H64Q51 352 45 360T38 376V382ZM67 195Q38 204 38 230Q38 255 62 264Q66 265 264 265H461L464 264Q467 262 469 261T475 256T481 249T485 240T487 230Q487 204 457 195H67"],62:[557,-55,525,57,468,"57 522Q57 539 67 548T90 557Q98 557 105 554Q111 551 281 444T456 332Q468 320 468 306T456 280Q452 276 282 169T105 58Q98 55 91 55Q79 55 68 63T57 90Q57 105 68 116Q70 118 219 213L366 306L219 399Q75 491 71 494Q57 507 57 522"],63:[617,1,525,62,462,"62 493Q62 540 107 578T253 617Q366 617 414 578T462 490Q462 459 445 434T411 400L394 390Q315 347 296 287Q294 278 293 247V217Q285 201 278 198T246 194T216 197T201 215V245V253Q201 379 351 456Q366 464 375 477Q377 482 377 490Q377 517 339 528T251 540Q182 540 159 517Q166 503 166 490Q166 468 151 453T114 438Q96 438 79 451T62 493ZM190 58Q190 85 208 100T249 115Q272 113 288 99T304 58Q304 33 287 17T246 0T206 16T190 58"],64:[617,6,525,44,481,"44 306Q44 445 125 531T302 617Q332 617 358 607T411 574T456 502T479 387Q481 361 481 321Q481 203 421 143Q381 103 332 103Q266 103 225 165T183 307Q183 390 227 449T332 508Q358 508 378 498Q350 541 304 541Q229 541 172 473T115 305Q115 208 171 140T306 71H310Q358 71 397 105Q409 115 436 115Q458 115 462 113Q481 106 481 86Q481 73 468 61Q401 -6 305 -6Q262 -6 217 14T133 71T69 170T44 306ZM410 306Q410 361 386 396T333 431Q300 431 277 394T254 305Q254 256 276 218T332 180Q364 180 387 217T410 306"],65:[623,-1,525,28,496,"191 76Q212 75 220 68T229 38Q229 10 208 1H129H80Q48 1 38 7T28 38Q28 51 29 57T40 69T70 76Q89 76 89 78Q90 79 117 205T173 461T205 599Q212 623 250 623H262H273Q312 623 319 599Q322 591 350 461T406 205T435 78Q435 76 454 76H458Q484 76 493 59Q496 53 496 38Q496 11 478 3Q474 1 395 1H317Q295 8 295 38Q295 65 311 73Q316 75 333 76L348 77V78Q348 80 341 112L334 143H190L183 112Q176 80 176 78Q175 76 178 76Q180 76 191 76ZM318 221Q313 238 288 366T263 519Q263 526 262 527Q261 527 261 520Q261 493 236 365T206 221Q206 219 262 219T318 221"],66:[611,-1,525,17,482,"39 1Q17 10 17 32V38V46Q17 65 34 73Q40 76 61 76H84V535H61H54Q27 535 19 553Q17 557 17 573Q17 583 17 587T23 599T39 610Q40 611 179 611Q320 610 332 607Q332 607 339 605Q394 591 427 547T461 454Q461 413 436 378T369 325L358 320Q405 311 443 270T482 169Q482 112 445 64T345 3L334 1H39ZM309 533Q302 535 234 535H168V356H230Q284 357 296 358T323 368Q346 380 361 402T377 452Q377 482 358 505T309 533ZM398 176Q396 218 371 246T315 279Q310 280 237 280H168V76H239Q316 77 327 81Q329 82 334 84Q398 107 398 176"],67:[622,11,525,40,485,"40 305Q40 437 110 529T281 622Q315 622 343 611T387 589T404 578Q409 585 415 596T425 611T435 618T452 622Q472 622 478 609T485 566Q485 559 485 540T484 508V460Q484 413 478 403T442 393Q417 393 409 402Q400 409 400 420Q400 428 395 445T380 487T347 528T295 546Q235 546 180 483T124 306Q124 245 141 197T186 121T241 80T296 66Q346 66 373 103T400 178Q400 209 435 209H442H450Q484 209 484 172Q480 96 421 43T281 -11Q177 -11 109 84T40 305"],68:[612,-1,525,16,485,"38 1Q16 8 16 38Q16 62 32 73Q39 76 58 76H78V535H58Q40 535 32 538Q16 548 16 573Q16 587 17 591Q23 604 34 607T83 611H166H176Q188 611 209 611T239 612Q299 612 337 597T415 530Q485 438 485 300Q485 180 431 100T301 3L291 1H38ZM400 301Q400 363 385 410T346 482T303 519T267 534Q261 535 210 535H162V76H214L267 77Q323 89 361 148T400 301"],69:[612,-1,525,18,502,"374 271Q374 241 367 232T332 223Q307 223 299 231Q290 240 290 263V279H173V76H418V118V144Q418 167 426 176T460 186Q491 186 500 166Q502 161 502 93V52Q502 25 499 17T480 1H41Q19 9 19 32V38Q19 63 36 73Q42 76 65 76H89V535H65H55Q44 535 38 537T25 548T19 573Q19 602 41 610H47Q53 610 63 610T88 610T121 610T160 611T204 611T251 611H458Q460 609 465 606T471 602T475 598T478 593T479 586T480 576T480 562V526V488Q480 452 462 444Q458 442 438 442Q413 442 405 450Q398 457 397 463T396 501V535H173V355H290V371Q290 394 299 403T332 412Q363 412 372 392Q374 387 374 317V271"],70:[612,-1,525,22,490,"384 260Q384 230 377 221T342 212Q317 212 309 220Q300 229 300 252V268H179V76H249Q264 67 267 61T271 38Q271 10 249 1H44Q22 9 22 32V38Q22 63 39 73Q45 76 69 76H95V535H69H59Q42 535 32 542T22 573Q22 602 44 610H50Q56 610 66 610T91 610T125 610T164 611T208 611T257 611H468Q470 609 475 606T481 602T485 598T488 593T489 586T490 576T490 562V526V488Q490 452 472 444Q468 442 448 442Q423 442 415 450Q408 457 407 463T406 501V535H179V344H300V360Q300 383 309 392T342 401Q373 401 382 381Q384 376 384 306V260"],71:[623,11,525,38,496,"38 306Q38 447 105 534T261 622Q280 622 298 618T329 608T350 596T366 585L371 581Q373 581 377 591T390 612T417 622Q437 622 443 609T450 566Q450 559 450 540T449 508V460Q449 413 443 403T407 393Q392 393 386 394T373 402T364 426Q360 472 335 509T271 546Q214 546 168 477T121 308Q121 210 164 138T271 65Q293 65 310 78T337 109T352 147T360 180T362 195Q362 196 333 196L304 197Q282 204 282 227V234Q282 247 282 251T288 261T304 272H474Q488 263 492 256T496 234Q496 211 479 199Q475 197 461 196H449V21Q441 6 434 3T412 -1H407H402Q385 -1 379 3T364 28Q350 14 322 2T260 -11Q173 -11 106 76T38 306"],72:[611,-1,525,16,508,"16 571Q16 597 27 604T74 611H125H208Q223 602 226 596T230 573Q230 559 227 551T217 540T204 536T186 535H165V356H359V535H338H333Q306 535 297 552Q295 556 295 573Q295 586 295 590T301 600T317 611H486Q501 602 504 596T508 573Q508 559 505 551T495 540T482 536T464 535H443V76H464H470Q482 76 489 75T502 64T508 38Q508 10 486 1H317Q306 5 301 11T296 21T295 38V44Q295 66 311 73Q318 76 338 76H359V280H165V76H186H192Q204 76 211 75T224 64T230 38Q230 10 208 1H39Q28 5 23 11T18 21T17 38V44Q17 66 33 73Q40 76 60 76H81V535H60Q45 535 38 536T24 545T16 571"],73:[611,-1,525,72,452,"400 76Q431 76 441 69T452 38Q452 29 452 26T450 18T443 9T430 1H95Q84 6 79 12T73 23T72 38Q72 65 90 73Q96 76 157 76H220V535H157H124Q93 535 83 542T72 573Q72 603 93 610Q97 611 264 611H430Q432 609 436 607T444 602T449 594Q452 588 452 573Q452 546 434 538Q428 535 367 535H304V76H367H400"],74:[612,11,525,57,479,"202 543T202 573T224 610H228Q231 610 237 610T251 610T269 610T291 611T315 611T342 611H457Q471 602 475 595T479 573Q479 549 462 538Q454 535 432 535H408V328Q408 159 408 133T402 93Q386 48 340 19T229 -11Q158 -11 108 16T57 100Q57 129 73 141T108 154Q128 154 143 140T159 102Q159 93 155 79Q188 65 228 65H230Q290 65 318 106Q323 115 323 139T324 329V535H274L224 536Q202 543 202 573"],75:[611,-1,525,18,495,"18 549T18 573T29 604T70 611H118H193Q207 603 210 596T214 573Q214 549 198 538Q191 535 172 535H152V421Q152 344 152 326T153 309L242 422L329 534Q327 535 322 536T314 538T308 542T303 548T300 558T298 573Q298 600 316 608Q322 611 392 611H463Q477 602 481 595T485 573Q485 535 446 535H441H420L281 357L436 77L454 76Q473 75 478 73Q495 62 495 38Q495 10 473 1H345Q334 5 329 11T324 21T323 38Q323 51 324 56T332 68T355 77L233 296L152 192V76H172Q191 76 198 73Q214 63 214 38Q214 9 193 1H41Q18 8 18 38Q18 61 35 73Q42 76 61 76H81V535H61Q42 535 35 538Q18 549 18 573"],76:[611,0,525,25,488,"27 594Q34 605 43 608T84 611H154H213Q258 611 269 605T281 573Q281 546 263 538Q257 535 222 535H185V76H404V118V145Q404 168 411 177T446 186H453Q478 186 486 167Q488 161 488 93V50Q488 24 485 17T466 1L258 0H147H99Q47 0 36 6T25 38Q25 59 35 69Q44 76 76 76H101V535H76H64Q36 535 27 552Q25 557 25 573T27 594"],77:[611,-1,525,11,512,"50 535Q37 536 31 537T18 547T12 573Q12 598 22 604T62 611H91H121Q147 611 158 607T178 587Q183 579 222 446T261 293Q261 289 262 288Q263 288 263 292Q263 311 298 434T346 588Q353 603 365 607T402 611H435H450Q488 611 500 605T512 573Q512 556 506 547T493 537T474 535H459V76H474Q487 75 493 74T505 64T512 38Q512 11 494 3Q490 1 424 1H386Q355 1 345 7T335 38Q335 55 341 64T354 74T373 76H388V302Q388 512 387 519Q382 482 346 359T304 228Q292 204 262 204T220 228Q215 237 179 359T137 519Q136 512 136 302V76H151Q164 75 170 74T182 64T189 38Q189 11 171 3Q167 1 101 1H63Q32 1 22 7T12 38Q12 55 18 64T31 74T50 76H65V535H50"],78:[611,0,525,20,504,"20 571Q20 598 30 604T73 611H105H136Q152 611 160 611T177 607T189 601T198 587T206 568T217 537T231 497Q354 142 365 95L368 84V535H347H342Q314 535 306 552Q304 556 304 573Q304 586 304 590T310 600T326 611H482Q497 602 500 596T504 573Q504 559 501 551T491 540T478 536T460 535H439V25Q432 7 424 4T389 0H374Q334 0 322 31L293 115Q171 468 159 517L156 528V76H177H183Q195 76 202 75T215 64T221 38Q221 10 199 1H43Q32 5 27 11T22 21T21 38V44Q21 66 37 73Q44 76 64 76H85V535H64Q49 535 42 536T28 545T20 571"],79:[621,10,525,56,468,"102 588Q140 621 240 621Q323 621 335 620Q393 613 422 588Q450 560 459 493T468 306Q468 185 460 118T422 23Q382 -10 289 -10H262H235Q142 -10 102 23Q74 50 65 118T56 306Q56 427 64 494T102 588ZM363 513Q357 523 347 530T324 540T302 544T280 546H268Q192 546 167 521Q150 501 145 452T140 300Q140 235 142 197T151 130T172 89T207 71T262 65Q317 65 341 81T374 144T384 300Q384 474 363 513"],80:[612,-1,525,19,480,"41 1Q19 9 19 32V38Q19 63 36 73Q42 76 65 76H89V535H65H55Q38 535 29 543T19 576Q19 603 41 610H49Q57 610 70 610T100 610T136 611T175 611Q190 611 216 611T255 612Q321 612 363 598T441 537Q480 486 480 427V421Q480 354 447 311T378 251Q339 230 275 230H239H173V76H197Q220 76 227 73Q244 62 244 38Q244 10 222 1H41ZM396 421Q396 461 369 491T300 533Q294 534 233 535H173V306H233Q294 307 300 308Q345 319 370 352T396 421"],81:[622,138,525,56,468,"56 306Q56 380 58 426T68 510T87 568T120 600T170 617T240 621Q323 621 335 620Q393 613 422 588Q450 560 459 493T468 306Q468 124 447 66Q433 23 394 6L424 -53Q454 -112 454 -118Q454 -128 441 -138H377Q367 -135 363 -129T333 -69L304 -11H254Q205 -10 180 -8T128 6T91 36T70 92T58 178T56 306ZM227 151Q227 171 262 171H276H281Q292 171 296 171T305 170T313 165T317 158T323 145T332 127L353 88Q356 88 361 95T372 131T382 202Q384 228 384 306Q384 452 371 492T304 544Q296 545 251 545Q230 545 215 543T188 534T169 520T155 497T147 466T143 423T141 371T140 306Q140 248 141 217T146 154T157 109T178 83T212 68T262 65H266L264 70Q261 75 256 85T247 105Q227 145 227 151"],82:[612,11,525,16,522,"16 571Q16 598 27 605T76 612Q84 612 108 612T148 611Q268 611 294 605Q346 592 389 550T432 440Q432 394 410 359Q393 329 366 310L358 303Q387 273 399 239Q405 219 405 178T408 106T421 68Q426 65 428 65Q433 65 435 74T438 96T441 112Q450 130 480 130H485Q519 130 522 100Q522 79 516 56T488 11T434 -11Q421 -11 408 -8T377 5T344 37T324 93Q322 101 322 154L321 209Q304 257 257 267Q252 268 207 268H165V76H186H192Q204 76 211 75T224 64T230 38Q230 10 208 1H39Q28 5 23 11T18 21T17 38V44Q17 66 33 73Q40 76 60 76H81V535H60Q45 535 38 536T24 545T16 571ZM348 440Q348 478 321 502T260 532Q252 534 208 535H165V344H208Q212 344 223 344T239 345T252 346T266 348T278 351T293 358Q348 387 348 440"],83:[622,11,525,51,472,"52 454Q52 524 107 572T229 621Q266 621 274 620Q326 610 360 588L371 581Q377 594 379 598T386 610T397 619T412 622Q433 622 439 610T446 570Q446 563 446 545T445 515V479Q445 441 444 432T436 417Q428 408 403 408T370 417Q361 424 361 434Q361 439 360 448T351 476T331 509T295 535T238 546Q194 546 163 522T132 458Q132 435 148 412Q155 401 166 393T192 380T218 371T247 364T270 359Q341 342 349 339Q389 325 418 296T461 229Q472 201 472 164Q469 92 417 41T287 -11Q240 -11 200 -1T143 19L126 29Q117 6 109 -2Q100 -11 84 -11Q64 -11 58 1T51 42Q51 49 51 66T52 95V135Q52 173 53 180T61 194Q70 203 95 203Q119 203 127 194Q136 186 136 168Q143 66 284 66H290Q325 66 350 85Q391 115 391 165Q391 204 369 228T322 260Q320 260 255 275T185 293Q123 309 88 355T52 454"],84:[612,-1,525,26,498,"129 38Q129 51 129 55T135 65T151 76H220V535H110V501Q110 470 109 464T101 450Q93 442 68 442H60Q37 442 28 461Q26 466 26 527L27 589Q36 607 49 610H55Q61 610 72 610T97 610T131 610T170 611T215 611T264 611H476Q478 609 483 606T489 602T493 598T496 593T497 586T498 576T498 562V526V488Q498 452 480 444Q476 442 456 442Q431 442 423 450Q416 457 415 463T414 501V535H304V76H374Q389 67 392 61T396 38Q396 10 374 1H151Q140 5 135 11T130 21T129 38"],85:[612,11,525,-4,528,"-3 573Q-3 597 8 604T50 612Q57 612 77 612T111 611H200Q214 602 218 595T222 573Q222 549 205 538Q198 535 175 535H151V359Q151 333 151 291Q152 177 156 162Q157 160 157 159Q165 123 193 95T262 66Q303 66 330 94T367 159Q371 175 371 191T373 359V535H349H339Q328 535 322 537T309 548T303 573T306 595T325 611H506Q520 602 524 595T528 573Q528 549 511 538Q504 535 481 535H457V364Q457 189 456 182Q448 101 394 45T262 -11Q189 -11 132 43T68 182Q67 189 67 364V535H43H33Q22 535 16 537T3 548T-3 573"],86:[613,7,525,19,505,"19 578Q19 585 20 590T23 598T29 604T38 608T48 610T62 611T78 612T97 611T119 611H195Q210 602 213 596T217 573Q217 561 216 555T206 542T179 535H164Q166 529 188 435T235 231T261 94L262 84V88Q263 91 263 94Q265 121 289 231T336 438L360 535H345Q308 535 308 566V573Q308 586 308 590T314 600T330 611H484Q499 602 502 595T505 573Q505 560 504 554T493 541T465 535H447L384 278Q321 19 319 14Q309 -7 278 -7H262H246Q215 -7 205 14Q203 19 140 278L78 535H59Q45 535 38 536T25 547T19 573V578"],87:[611,7,525,12,512,"459 611Q491 611 501 605T512 573Q512 538 482 535H474L439 276Q406 26 402 11Q398 2 389 -3Q387 -3 386 -4L380 -7H359H349Q324 -7 313 13Q307 29 285 139T263 275Q263 283 262 283Q261 282 261 274Q261 248 239 137T211 13Q200 -7 175 -7H165H144Q136 -3 127 3Q121 10 117 36T85 276L50 535H42Q26 536 19 545T12 564V573Q12 603 33 610Q37 611 101 611H134Q165 611 175 604T186 573Q186 563 186 559T182 547T169 538T143 535H122V531Q124 517 133 446T155 266T172 96V84L173 102Q176 157 192 243T215 346Q227 367 259 367H262H265Q297 367 309 346Q316 329 332 243T351 102L352 84V96Q356 161 368 266T390 444T402 531V535H381Q366 535 359 536T345 547T338 573Q338 600 356 608Q362 611 425 611H459"],88:[611,-1,525,28,495,"39 571Q39 597 49 604T93 611H141H218Q233 602 236 595T239 573Q239 538 210 535Q202 535 202 534T215 507T243 454L257 428L307 535H298Q266 538 266 573Q266 584 267 588T273 598T289 611H366H401Q442 611 454 605T466 573Q466 546 448 538Q442 535 421 535H398L299 327Q299 323 362 201L426 77L449 76Q467 76 475 75T489 65T495 38Q495 11 477 3Q473 1 395 1H317Q295 8 295 38Q295 73 325 76L334 77Q333 78 314 117T276 196L257 235L239 196Q221 157 204 118T186 77Q190 76 196 76Q211 74 218 67T227 55T228 38Q228 28 227 24T221 13T206 1H50Q28 9 28 32V38Q28 63 45 73Q51 76 73 76H96L214 324Q215 327 162 431L108 535H85H79Q67 535 60 536T46 546T39 571"],89:[611,-1,525,20,505,"20 573Q20 597 30 604T72 611H121H198Q212 602 216 595T220 573Q220 568 219 563T217 555T214 549T211 544T207 541T203 538T198 537T194 536T190 536L188 535Q179 535 179 534L188 516Q196 497 208 470T232 415T252 363T261 332Q261 329 262 329T263 332Q263 354 333 508L345 534Q345 535 336 535Q305 538 305 567V573Q305 589 308 595T327 611H483Q505 598 505 573Q505 549 488 538Q481 535 460 535H438L304 245V76H325H331Q343 76 350 75T363 64T369 38Q369 10 347 1H178Q167 5 162 11T157 21T156 38V44Q156 66 172 73Q180 76 199 76H220V245L86 535H64Q44 535 36 538Q20 548 20 573"],90:[612,-1,525,48,481,"71 1Q60 5 55 11T49 23T48 39V46Q48 56 58 73T131 183Q171 242 197 282L366 535H144V501Q144 470 143 464T135 450Q127 442 102 442H94Q71 442 62 461Q60 466 60 527L61 589Q70 607 83 610H88Q93 610 102 610T124 610T154 610T188 611T227 611T270 611H454Q456 609 461 606T467 601T471 597T474 591T475 584T476 572V565Q476 555 466 538T393 428Q353 369 327 329L158 76H397V120V146Q397 169 405 179T439 189Q470 189 479 169Q481 164 481 95V48Q481 24 478 16T459 1H71"],91:[694,82,525,214,484,"237 -82Q221 -78 214 -58V305Q214 669 216 673Q220 687 231 690T278 694H350H461Q462 693 467 690T474 685T478 679T482 670T483 656Q483 632 471 625T428 617Q422 617 406 617T379 618H298V-7H379H420Q459 -7 471 -13T483 -45Q483 -55 483 -59T477 -70T461 -82H237"],92:[694,83,525,58,466,"58 659Q58 673 68 683T93 694Q114 694 125 673Q132 659 297 314T464 -36Q466 -44 466 -48Q466 -66 454 -74T431 -83Q410 -83 399 -62Q391 -47 226 298T60 648Q58 656 58 659"],93:[695,82,525,41,310,"41 656Q41 681 53 688T99 695Q107 695 133 695T177 694H288Q307 681 310 669V-58Q303 -76 288 -82H64Q41 -73 41 -45Q41 -21 53 -14T96 -6Q102 -6 118 -6T145 -7H226V618H145H100Q67 618 54 625T41 656"],94:[611,-460,525,96,428,"138 460Q121 460 109 479T96 512Q96 527 106 534Q109 536 178 571T253 609Q256 611 264 611Q272 610 343 574Q357 567 369 561T389 550T402 543T411 538T416 535T420 532T422 529T425 525Q428 518 428 512Q428 498 416 479T386 460H384Q377 460 316 496L262 526L208 496Q147 460 138 460"],95:[-25,95,525,57,468,"57 -60Q57 -33 86 -25H438Q468 -34 468 -60T438 -95H86Q57 -86 57 -60"],96:[681,-357,525,176,350,"176 479Q176 563 227 622T310 681Q324 680 337 667T350 641Q350 627 340 619T312 599T280 566Q256 531 252 485V471Q261 472 262 472Q285 472 302 455T320 414Q320 389 303 373T261 357Q223 357 200 391T176 479"],97:[439,6,525,48,524,"126 306Q105 306 90 321T74 359Q74 439 211 439Q268 439 276 438Q343 426 383 390T430 306Q431 301 431 190V81Q446 79 465 78T492 76T509 72T521 60T524 38Q524 11 506 3Q502 1 466 1Q426 1 406 5T379 14T355 36L345 30Q284 -6 205 -6Q135 -6 92 39T48 141Q48 182 79 212T158 256T252 278T342 285H347V290Q347 315 325 335T267 362Q258 363 224 363Q189 363 185 362H179L178 358Q178 353 178 352T176 345T174 337T170 330T165 322T158 316T150 311T139 308T126 306ZM132 140Q132 115 157 93T224 70Q269 70 302 87T344 133Q346 139 347 175V211H339Q256 209 194 190T132 140"],98:[611,6,525,4,492,"4 573Q4 596 15 603T52 611H90H124Q146 611 155 608T171 591Q173 586 173 491V396L182 402Q217 424 256 431Q280 437 309 437Q376 437 434 379T492 217Q492 162 473 118T422 47T358 8T293 -6Q229 -6 174 38Q171 13 163 7T135 1H131H122Q99 1 90 23L89 279V535H58L27 536Q4 543 4 573ZM409 215Q409 269 377 315T283 361Q255 361 224 344T177 297L173 290V167Q189 124 213 97T278 70Q330 70 369 111T409 215"],99:[440,6,525,66,466,"291 -6Q196 -6 131 60T66 216Q66 296 119 361Q154 403 200 421T273 439Q275 440 293 440H313Q400 440 433 409Q454 388 454 359Q454 335 439 321T402 306Q380 306 365 321T350 357V362L340 363Q339 363 326 363T303 364Q280 364 266 362Q217 352 184 313T151 215Q151 153 199 112T313 70Q341 70 357 85T381 118T394 140Q402 146 424 146Q443 146 447 144Q466 137 466 117Q466 106 457 88T429 47T374 10T291 -6"],100:[611,6,525,31,520,"266 573Q266 596 277 603T314 611H352H385Q411 611 419 607T435 586V76H498Q512 67 516 60T520 38Q520 9 498 1H436Q429 1 417 1T398 0Q375 0 363 7T351 34V43L342 36Q288 -6 223 -6Q143 -6 87 58T31 216Q31 307 88 372T230 437Q292 437 342 405L351 399V535H320L289 536Q266 543 266 573ZM351 290Q347 302 337 316T302 346T244 361Q193 361 154 319T115 215Q115 152 152 111T235 70Q314 70 351 170V290"],101:[440,6,525,48,465,"48 217Q48 295 100 361T248 439L258 440Q268 440 274 440Q329 438 369 416T428 359T456 292T464 228Q464 215 461 208T454 198T442 190L288 189H135L138 179Q153 132 199 102T303 71Q336 71 353 86T380 120T398 143Q404 146 422 146Q453 146 462 126Q464 120 464 116Q464 84 416 39T285 -6Q187 -6 118 59T48 217ZM377 264Q371 291 365 306T341 338T294 362Q288 363 264 363Q225 363 190 336T139 264H377"],102:[617,-1,525,35,437,"43 395Q44 405 44 408T47 416T53 423T66 431H176V461Q176 500 182 518Q201 570 252 593T353 617Q399 614 418 593T437 548Q437 528 424 514T387 499Q365 499 353 511T338 537V541H328Q275 536 261 494Q260 490 260 460V431H327Q334 431 346 431T364 432Q392 432 404 425T416 393T405 362T365 355H327H260V76H319Q375 76 388 71T401 38Q401 27 400 23T395 12T379 1H58Q47 6 42 12T36 23T35 38Q35 65 53 73Q59 76 117 76H176V355H121H93Q64 355 54 362T43 395"],103:[442,229,525,28,510,"60 274Q60 337 107 386T233 436Q278 436 316 417L329 410L338 416Q384 442 427 442T489 423T509 381T494 345T460 332Q449 332 440 338Q432 341 427 348T419 360T415 365Q414 364 410 364L383 355Q406 320 406 274Q406 211 358 162T233 112Q189 112 155 128L146 133Q142 125 142 115Q142 99 150 85T175 71Q182 72 187 70Q188 70 195 70T218 70T254 69Q259 69 275 69T297 69T318 68T340 66T361 62T384 57T405 49T428 38Q495 -1 495 -76Q495 -143 427 -186T262 -229Q161 -229 94 -185T29 -73Q30 -60 33 -48T39 -26T47 -8T57 8T67 20T77 30T86 38L91 43Q91 44 86 53T75 80T70 117Q70 142 89 183L83 194Q60 232 60 274ZM321 274Q321 312 296 337T230 362Q197 362 171 338T145 274Q145 235 170 211T233 187Q273 187 297 212T321 274ZM422 -78Q422 -54 408 -38T366 -15T315 -6T255 -4H200Q198 -4 193 -4T183 -3Q148 -3 125 -26T102 -78Q102 -110 151 -132T261 -154Q321 -154 371 -132T422 -78"],104:[611,-1,525,4,520,"4 573Q4 596 15 603T52 611H90H124Q146 611 155 608T171 591Q173 586 173 489Q173 394 175 394L186 402Q197 410 219 420T269 434Q278 436 306 436Q343 436 371 423Q411 402 423 365T436 265Q436 257 436 239T435 211V198V76H498Q512 67 516 60T520 38Q520 9 498 1H308Q286 10 286 32V38V46Q286 65 303 73Q309 76 329 76H351V188Q351 204 351 230T352 266Q352 321 341 341T288 361Q253 361 222 341T176 274L174 264L173 170V76H236Q250 67 254 60T258 38Q258 9 236 1H27Q4 8 4 38Q4 53 8 60T27 76H89V535H58L27 536Q4 543 4 573"],105:[612,-1,525,72,462,"202 538T202 559T218 596T260 612Q283 612 300 597T317 560Q317 538 300 523T260 507Q235 507 219 522ZM411 76Q441 76 451 69T462 38Q462 29 462 26T460 18T453 9T440 1H94Q72 8 72 33V38Q72 46 72 49T74 58T81 68T94 76H233V355H167L102 356Q80 363 80 393Q80 418 91 425T138 432Q145 432 165 432T200 431H295Q297 429 303 425T310 420T314 415T317 404T317 389T318 363Q318 354 318 314T317 241V76H378H411"],106:[612,228,525,48,377,"261 559Q261 580 277 596T319 612Q342 612 359 597T376 560T360 523T320 507Q296 507 279 523T261 559ZM75 -91T100 -91T138 -107T152 -144V-150L160 -151H193H203Q241 -151 267 -121Q284 -97 288 -73T292 23V151V355H218L145 356Q123 365 123 387V393Q123 422 145 430H148Q151 430 156 430T169 430T185 430T205 431T227 431T251 431H354Q356 430 360 427T365 424T369 420T372 416T373 410T375 402T376 391T377 376T377 356Q377 345 377 286T376 176Q376 -67 371 -88Q362 -123 342 -151T299 -194Q254 -228 180 -228Q84 -226 56 -177Q49 -162 48 -148Q48 -122 61 -107"],107:[611,0,525,13,507,"13 42Q13 63 23 69T69 76H102V535H69H54Q34 535 24 542T13 573Q13 588 15 593Q22 605 29 608T56 611H95Q113 611 122 611T140 610T152 609T159 607T163 603T167 597T173 589V413L174 237L295 355H275Q260 355 253 356T239 367T232 393Q232 419 243 425T304 431H359H464Q479 422 482 415T485 393Q485 364 464 356L431 355H398L293 254L427 76H486Q501 67 504 60T507 38Q507 28 507 24T501 12T486 1H314Q292 8 292 38Q292 62 308 73Q312 75 326 76L338 77L290 140Q279 154 267 171T248 196L242 204L207 171L173 139V76H206H221Q241 76 251 69T262 38Q262 11 244 3Q240 1 138 1Q123 1 100 1T70 0Q32 0 23 7T13 42"],108:[612,-1,525,51,474,"51 573Q51 602 73 610H76Q79 610 84 610T97 610T113 610T133 611T155 611T179 611H282Q301 598 304 586V76H452Q466 67 470 60T474 38Q474 10 452 1H73Q51 9 51 32V38Q51 54 54 60T73 76H220V535H146L73 536Q51 545 51 567V573"],109:[437,-1,525,-12,536,"133 76Q156 74 164 67T172 38Q172 9 151 1H11Q-12 8 -12 38Q-12 61 5 73Q10 75 28 76H45V355H28Q10 356 5 358Q-12 370 -12 393Q-12 419 11 431H52H70Q91 431 100 427T116 405Q163 436 200 436Q255 436 281 390L285 394Q289 398 292 400T301 407T314 415T329 423T346 429T366 434T389 436H392Q425 436 448 411Q469 390 474 360T480 268V232V203V76H497Q520 74 528 67T536 38Q536 9 515 1H396Q374 9 374 32V38Q374 73 402 76H409V191V242Q409 317 404 339T375 361Q343 361 323 332T299 264Q298 258 298 165V76H315Q338 74 346 67T354 38Q354 9 333 1H214Q192 9 192 32V38Q192 73 220 76H227V191V242Q227 317 222 339T193 361Q161 361 141 332T117 264Q116 258 116 165V76H133"],110:[436,-1,525,4,520,"89 431Q94 431 105 431T122 432Q173 432 173 399Q173 394 175 394Q176 394 190 404T233 425T298 436Q343 436 371 423Q411 402 423 365T436 265Q436 257 436 239T435 211V198V76H498Q512 67 516 60T520 38Q520 9 498 1H308Q286 9 286 32V38V45Q286 65 303 73Q309 76 329 76H351V188Q351 204 351 230T352 266Q352 321 341 341T288 361Q253 361 222 341T176 274L174 264L173 170V76H236Q250 67 254 60T258 38Q258 9 236 1H27Q4 8 4 38Q4 53 8 60T27 76H89V355H58L27 356Q4 363 4 393Q4 408 8 415T27 431H89"],111:[440,6,525,52,472,"52 216Q52 318 118 379T261 440Q343 440 407 378T472 216Q472 121 410 58T262 -6Q176 -6 114 58T52 216ZM388 225Q388 281 351 322T261 364Q213 364 175 325T136 225Q136 158 174 114T262 70T350 114T388 225"],112:[437,221,525,4,492,"89 431Q93 431 104 431T121 432Q173 432 173 401V396L182 402Q237 437 305 437Q376 437 434 378T492 217Q492 146 459 93T382 17T291 -6Q261 -6 232 5T188 26L174 37Q173 37 173 -54V-146H236Q250 -155 254 -162T258 -184Q258 -213 236 -221H27Q4 -214 4 -184Q4 -169 8 -162T27 -146H89V355H58L27 356Q4 363 4 393Q4 408 8 415T27 431H89ZM409 215Q409 269 377 315T283 361Q255 361 224 344T177 297L173 290V167Q189 124 213 97T278 70Q330 70 369 111T409 215"],113:[437,222,525,34,545,"34 215Q34 309 91 368T222 436Q224 436 231 436T242 437Q309 437 372 390V401Q372 419 381 428T414 437Q426 437 432 436T444 430T456 412V-146H489H504Q524 -146 534 -153T545 -184Q545 -211 527 -219Q523 -221 414 -221Q398 -221 374 -221T342 -222Q304 -222 294 -216T283 -184Q283 -157 301 -149Q307 -146 339 -146H372V-51Q372 43 371 43L364 38Q357 33 345 26T318 12T280 -1T236 -6Q155 -6 95 55T34 215ZM117 215Q117 152 157 111T250 70Q289 70 318 92T363 146Q372 163 372 192V215L371 263Q339 360 254 360Q206 360 162 321T117 215"],114:[437,-1,525,24,487,"327 76Q359 76 369 70T380 38Q380 10 359 1H47Q24 8 24 38Q24 54 28 61T47 76H145V355H96L47 356Q24 363 24 393Q24 409 28 416T47 431H207Q223 419 226 414T229 393V387V369Q297 437 394 437Q436 437 461 417T487 368Q487 347 473 332T438 317Q428 317 420 320T407 327T398 337T393 347T390 356L388 361Q348 356 324 345Q228 299 228 170Q228 161 228 151T229 138V76H293H327"],115:[440,6,525,71,458,"72 317Q72 361 108 396T229 439Q231 439 245 439T268 440Q303 439 324 435T353 427T363 423L372 432Q380 440 397 440Q430 440 430 395Q430 390 430 380T429 366V335Q429 311 422 302T387 293Q364 293 355 300T346 316T343 336T325 353Q306 364 257 364Q209 364 178 351T147 317Q147 284 231 272Q327 256 357 247Q458 210 458 129V121Q458 74 413 34T271 -6Q246 -6 224 -3T189 5T165 14T150 22T144 26Q142 23 139 18T135 11T132 6T128 1T124 -2T119 -4T113 -5T104 -6Q84 -6 78 6T71 43Q71 48 71 60T72 79Q72 132 73 141T81 157Q90 166 115 166Q135 166 142 162T157 140Q168 108 191 90T260 70Q297 70 323 76T361 91T379 110T384 129Q384 157 346 171T247 195T165 212Q119 228 96 256T72 317"],116:[554,6,525,25,448,"25 395Q26 405 26 408T29 416T35 423T48 431H145V481L146 532Q154 547 161 550T184 554H189Q218 554 227 534Q229 529 229 480V431H405Q406 430 411 427T418 422T422 416T426 407T427 393Q427 387 427 382T424 374T421 368T417 363T413 360T408 358L405 356L317 355H229V249Q229 237 229 214T228 179Q228 126 241 98T295 70Q354 70 365 149Q366 167 375 174Q383 182 407 182H415Q438 182 446 166Q448 161 448 148Q448 84 398 39T282 -6Q226 -6 189 29T146 128Q145 134 145 247V355H96H72Q45 355 35 362T25 395"],117:[431,5,525,4,520,"4 393Q4 416 15 423T52 431H90Q141 431 151 429T168 417Q171 412 173 409V254L174 100Q182 70 244 70Q320 70 344 119Q349 130 350 144T351 248V355H320L289 356Q266 363 266 393Q266 408 270 415T289 431H351H386Q409 431 418 428T433 411Q435 406 435 241V76H498Q512 67 516 60T520 38Q520 9 498 1H436H394Q372 1 364 5T351 26L342 21Q293 -5 227 -5Q118 -5 96 67Q91 82 90 101T89 227V355H58L27 356Q4 363 4 393"],118:[432,4,525,24,500,"24 392Q24 417 36 424T79 432Q85 432 103 432T132 431H215Q229 422 233 415T237 393Q237 355 198 355H193H172L262 77L352 355H331H323Q288 355 288 393Q288 409 291 415T310 431H478Q491 423 495 416T500 393Q500 364 478 356L452 355H426L374 190Q320 24 318 20Q307 -4 273 -4H262H251Q217 -4 206 20Q204 24 150 190L98 355H72L47 356Q24 363 24 392"],119:[431,4,525,16,508,"54 355Q16 355 16 388V393Q16 423 37 430Q41 431 125 431H162Q206 431 218 425T230 393Q230 366 212 358Q206 355 174 355Q141 355 141 354L150 296Q181 110 181 89V84Q182 85 183 96Q185 118 199 173T218 237Q223 247 245 259H264H268Q294 259 309 240Q315 229 329 174T343 92Q343 84 344 84V86Q344 88 344 91T345 97Q347 125 356 187T374 301T383 354Q383 355 350 355H333Q314 355 304 362T294 393Q294 420 312 428Q318 431 401 431H440Q485 431 496 425T508 393Q508 382 508 377T498 363T470 355L455 354Q455 353 441 271T413 104T396 16Q384 -4 355 -4H351Q315 -4 305 9T280 79Q278 90 276 96Q265 149 265 169Q265 176 264 169Q263 166 263 162Q261 130 248 79T230 18Q220 -4 183 -4H175L151 -3Q134 5 127 17L112 102Q97 188 83 270T69 354Q62 355 54 355"],120:[432,-1,525,29,495,"35 393Q35 417 46 424T89 432Q95 432 112 432T141 431H223Q238 422 241 415T244 393Q244 389 244 383T237 367T216 355Q209 355 209 354L234 319Q259 286 260 286L308 354Q308 355 301 355Q285 356 278 365T270 384L271 393Q271 420 289 428Q295 431 376 431H459Q460 430 465 427T472 422T476 416T480 407T481 393Q481 368 470 362T434 355H425H392L344 290Q295 225 295 223Q294 223 309 203T350 149L405 77L439 76H453Q474 76 484 69T495 38Q495 10 473 1H303Q281 9 281 32V38Q281 49 282 54T290 67T313 76Q324 76 324 77L259 173L197 77Q202 76 209 76Q225 75 233 68T241 55T242 38Q242 28 242 24T236 12T221 1H51Q29 9 29 32V38Q29 48 29 51T31 59T38 67T51 76H117L171 149Q224 222 224 223L124 355H90H78Q54 355 45 361T35 393"],121:[431,228,525,26,501,"26 393Q26 417 37 424T80 431H134H217Q232 422 235 416T239 393Q239 379 236 371T226 360T214 356T197 355L179 354V353L188 330Q197 306 209 272T235 201T259 133T271 89V84L274 95Q279 122 298 185T335 300T352 354Q352 355 331 355Q312 355 304 358Q288 368 288 393Q288 408 291 415T310 431H478Q479 430 484 427T491 422T495 416T499 407T500 393Q500 376 493 367T479 357T458 355H452Q426 355 425 353Q420 337 351 124T280 -94Q240 -195 168 -220Q147 -228 125 -228Q89 -228 66 -201T42 -139Q42 -116 56 -102T93 -87Q117 -87 130 -102T144 -135V-138H126Q121 -148 121 -150T130 -152Q182 -147 207 -87Q211 -78 223 -40T236 1Q230 10 102 355H75L49 356Q26 363 26 393"],122:[432,-1,525,34,475,"56 1Q40 7 37 14T34 41Q34 59 36 64Q39 67 43 73Q65 95 191 213T341 355H133V334Q133 306 124 297Q116 289 91 289H83Q60 289 51 308Q49 313 49 361L50 409Q59 427 72 430H78Q83 430 92 430T115 430T144 430T179 431T219 431T262 431H450Q452 430 455 428T459 424T463 422T466 419T468 416T469 413T470 409T471 404T472 398T472 391Q472 374 469 368L462 358Q453 349 315 218Q210 122 164 76H391V103Q391 136 400 146Q409 155 433 155Q464 155 473 135Q475 130 475 78V46Q475 24 472 16T453 1H56"],123:[694,83,525,50,475,"430 -7H436Q449 -7 456 -8T469 -19T475 -45Q475 -69 466 -76T434 -83H419Q386 -82 363 -80T308 -69T253 -41T223 7L221 17L220 118V220L218 224Q215 229 214 230T210 235T204 241T195 246T184 252T170 257T151 262T127 265Q118 267 100 267T69 270T52 283Q50 288 50 306V314Q50 335 67 341Q68 342 102 343T172 355T217 386L220 392V493L221 595Q225 611 230 621T251 650T304 679T395 693L406 694Q418 694 426 694Q458 694 466 685Q475 676 475 656T466 627Q458 618 430 618Q319 618 305 587L304 486Q304 476 304 458T305 431Q305 385 295 358T251 311L243 306Q243 305 254 298T281 274T302 231Q304 223 304 125L305 25Q309 16 316 10T352 -1T430 -7"],124:[694,82,525,228,297,"228 668Q241 694 262 694Q268 694 273 693T282 688T287 682T293 674L297 668V-57Q282 -82 262 -82Q239 -82 228 -57V668"],125:[694,83,525,49,475,"49 655Q49 674 56 682T73 692T106 694Q141 693 167 690T224 677T275 647T303 595L305 392Q313 367 347 356T417 344T457 341Q475 335 475 306Q475 292 473 285T464 273T451 269T430 267Q352 262 327 246Q311 236 305 220L303 17L301 7Q294 -16 277 -33T242 -60T196 -74T150 -80T106 -83Q78 -83 72 -82T58 -74Q49 -65 49 -44Q49 -24 58 -16Q66 -7 94 -7Q143 -7 171 -1T207 10T220 25V125Q220 223 222 231Q228 257 243 274T270 299L281 306Q234 329 222 381Q220 387 220 486V587Q212 597 207 601T173 612T94 618Q66 618 58 627Q49 635 49 655"],126:[611,-466,525,87,437,"125 467Q113 467 100 480T87 509Q88 520 111 543Q172 602 209 609Q219 611 224 611Q246 611 263 596T290 566T304 551Q319 551 367 594Q383 610 396 610H400Q411 610 424 597T437 568Q436 557 413 534Q348 469 305 466Q278 466 260 481T234 511T220 526Q205 526 157 483Q141 467 129 467H125"],127:[612,-519,525,104,421,"104 565Q104 590 120 600T155 611Q175 611 180 610Q217 599 217 565Q217 545 202 532T166 519H159H155Q120 519 107 547Q104 553 104 565ZM307 565Q307 580 317 593T346 610Q348 610 350 610T354 611Q355 612 367 612Q395 611 408 597T421 565T409 534T365 519H358Q336 519 322 532T307 565"]});MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Typewriter/Regular/BasicLatin.js");
PypiClean
/CoDocParser-0.2.49.tar.gz/CoDocParser-0.2.49/docparser/plugins/data_type_behavior.py
import re import time import traceback from docparser.core.behavior_base import BehaviorBase class DataTypeBehavior(BehaviorBase): """ 数据类型转换处理行为 ================= 配置说明:主节点下 "data_type_format": { # table:对应表格模板中的column中的列名, # data_type 对应执行转换方法 # format: 格式化的样式 # collect: 复杂情况使用正则表达式来处理,必须命名捕获组,且对应format的组名 例如:'{a}+{b}' '(?P<a>\\w*?,)afasdf(?P<a>\\d+)' 'hello,afasdf123123' => 'hello+123123' # filter: 使用正则过滤当前值的干扰字符串 # default: 设置默认值的情况下,如果转换失败会使用默认值替换原值 "ESTIMATE ARRIVAL AT POD Country": {"table": "", "data_type": "time", "format": "%A, %d %b, %Y %I:%M %p", "filter": "(\\n)"}, "ESTIMATE ARRIVAL AT POD Time": {"table": "", "data_type": "time", "format": "%A, %d %b, %Y %I:%M %p", "filter": "(\\n)"}, } """ class_index = 1 class TypeConverter: @classmethod def convert_str(cls, table, error, key, conf, values, collect, val_format, val_filter, default_val, logger): return values @classmethod def convert_time(cls, table, error, key, conf, values, collect, val_format, val_filter, default_val, logger): format_list = val_format if isinstance(val_format, list) else [val_format] for i in range(len(values)): for f in format_list: try: values[i] = time.strftime("%Y-%m-%d %H:%M:%S", time.strptime(values[i], f)) break except ValueError: if default_val: values[i] = default_val logger.getLogger(cls.__name__).error( f'时间格式转换异常,原因: [{values[i]} | {f} | {default_val}] <{traceback.format_exc()}>') except Exception: if default_val: values[i] = default_val logger.getLogger(cls.__name__).error( f'时间格式转换异常,原因: [{values[i]} | {f} | {default_val}] <{traceback.format_exc()}>') return values def data_processing(self, ref_data, data: list, error: list, config: dict, logger, additional) -> dict: """ 数据处理行为 :param ref_data: 源数据 :param data: execl解析出数据 :param error: execl解析出现的错误 :param config: 解析配置 :param logger: 日志工具 :param additional: 附加数据 """ format_config = config.get("data_type_format") if data and len(data) > 0 and format_config is not None: cls = DataTypeBehavior.TypeConverter() for key, conf in format_config.items(): table_name = conf.get("table") data_type = conf.get("data_type") fun_name = f"convert_{data_type.lower()}" if hasattr(DataTypeBehavior.TypeConverter, fun_name): callback = getattr(cls, fun_name) else: continue val_format = conf.get("format") collect = conf.get("collect") val_filter = conf.get("filter") default_val = conf.get("default") values = self._find_values(key, table_name, data) self.__filter(values, val_filter) self.__format(values, collect, val_format) callback(data, error, key, conf, values, collect, val_format, val_filter, default_val, logger) self._restore_values(key, table_name, data, values) return additional @classmethod def __filter(cls, values, rule): """ 过滤指定字符 """ regex = re.compile(rule) for i in range(len(values)): values[i] = regex.sub("", values[i]) @classmethod def __format(cls, values, collect, val_format): if collect and collect.strip() != "": regex = re.compile(collect) for i in range(len(values)): if (match := regex.search(values[i])) is not None: if (group_dict := match.groupdict()) is not None: values[i] = val_format.format(**group_dict)
PypiClean
/ApiMeter-1.2.8.tar.gz/ApiMeter-1.2.8/apimeter/locusts.py
import io import multiprocessing import os import sys from locust.main import main from apimeter.logger import color_print from apimeter.testcase import TestcaseLoader def parse_locustfile(file_path): """ parse testcase file and return locustfile path. if file_path is a Python file, assume it is a locustfile if file_path is a YAML/JSON file, convert it to locustfile """ if not os.path.isfile(file_path): color_print("file path invalid, exit.", "RED") sys.exit(1) file_suffix = os.path.splitext(file_path)[1] if file_suffix == ".py": locustfile_path = file_path elif file_suffix in ['.yaml', '.yml', '.json']: locustfile_path = gen_locustfile(file_path) else: # '' or other suffix color_print("file type should be YAML/JSON/Python, exit.", "RED") sys.exit(1) return locustfile_path def gen_locustfile(testcase_file_path): """ generate locustfile from template. """ locustfile_path = 'locustfile.py' template_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "templates", "locustfile_template" ) TestcaseLoader.load_test_dependencies() testset = TestcaseLoader.load_test_file(testcase_file_path) host = testset.get("config", {}).get("request", {}).get("base_url", "") with io.open(template_path, encoding='utf-8') as template: with io.open(locustfile_path, 'w', encoding='utf-8') as locustfile: template_content = template.read() template_content = template_content.replace("$HOST", host) template_content = template_content.replace("$TESTCASE_FILE", testcase_file_path) locustfile.write(template_content) return locustfile_path def start_master(sys_argv): sys_argv.append("--master") sys.argv = sys_argv main() def start_slave(sys_argv): if "--slave" not in sys_argv: sys_argv.extend(["--slave"]) sys.argv = sys_argv main() def run_locusts_with_processes(sys_argv, processes_count): processes = [] manager = multiprocessing.Manager() for _ in range(processes_count): p_slave = multiprocessing.Process(target=start_slave, args=(sys_argv,)) p_slave.daemon = True p_slave.start() processes.append(p_slave) try: if "--slave" in sys_argv: [process.join() for process in processes] else: start_master(sys_argv) except KeyboardInterrupt: manager.shutdown()
PypiClean
/Miscoto-3.1.2.tar.gz/Miscoto-3.1.2/miscoto/query.py
# This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> import os import tempfile import clyngor from miscoto import utils def get_scopes(instance_f, encoding): """Get metabolic scope of a microbiota Args: instance_f (str): ASP instance file encoding (str): ASP model encoding Returns: TermSet: ASP model """ prg = [encoding, instance_f] options = '' best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity: best_model = model return best_model def get_grounded_communities_from_file(instance_f, encoding): """Ground the model, from a file Args: instance_f (str): model file encoding (str): ASP model encoding Returns: bytes: grounded model """ prg = [encoding, instance_f] grounding = clyngor.grounded_program(prg) return grounding def get_communities_from_g(grounding): """Get optimal community, from grounding Args: grounding (bytes): grounded model Returns: TermSet: solution """ options = '--configuration jumpy --opt-strategy=usc,oll' best_model = None models = clyngor.solve_from_grounded(grounding, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_communities(lp_instance, encoding): """Get optimal community, from TermSet Args: lp_instance (TermSet): microbiota model encoding (str): ASP model encoding Returns: TermSet: solution """ options = '--configuration jumpy --opt-strategy=usc,5' prg = [encoding, lp_instance] best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_intersection_communities_from_g(grounding, optimum): """Get intersection of solutions, from grounding Args: grounding (bytes): grounded model optimum (str): optimal score Returns: TermSet: intersection """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode cautious --opt-mode=optN,' +str(optimum) models = clyngor.solve_from_grounded(grounding, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_intersection_communities_from_g_noopti(grounding): """Get intersection of solutions, from grounding, without optimal score Args: grounding (bytes): grounded model Returns: TermSet: intersection """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode cautious --opt-mode=optN' models = clyngor.solve_from_grounded(grounding, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_intersection_communities_opti(lp_instance, optimum, encoding): """Get intersection of solutions, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP model encoding Returns: TermSet: intersection """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode cautious --opt-mode=optN,' + str(optimum) prg = [encoding, lp_instance] best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_intersection_communities(lp_instance, encoding): """Get intersection of solutions, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP model encoding Returns: TermSet: intersection """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode cautious --opt-mode=optN' prg = [encoding, lp_instance] best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_all_communities_from_g(grounding, optimum, nmodels=0): """Get all optimal communities, from grounding Args: grounding (bytes): grounded model optimum (str): optimal score nmodels (int, optional): Defaults to 0. number of models to compute, 0 = all Returns: list: list of Termsets """ options = '--configuration handy --opt-strategy=usc,5 --opt-mode=optN,' +str(optimum) models = clyngor.solve_from_grounded(grounding, options=options, nb_model=nmodels, use_clingo_module=False).by_arity.discard_quotes opt_models = clyngor.opt_models_from_clyngor_answers(models) return opt_models def get_all_communities_from_g_noopti(grounding, nmodels=0): """Get all optimal communities, from grounding, without optimal score Args: grounding (bytes): grounded model nmodels (int, optional): Defaults to 0. number of models, 0 = all Returns: list: list of TermSets """ options = '--configuration handy --opt-strategy=usc,5 --opt-mode=optN' models = clyngor.solve_from_grounded(grounding, options=options, nb_model=nmodels, use_clingo_module=False).by_arity.discard_quotes opt_models = clyngor.opt_models_from_clyngor_answers(models) return opt_models def get_all_communities_opti(lp_instance, optimum, encoding, nmodels=0): """Get all communities, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP model encoding file nmodels (int, optional): Defaults to 0. number of models, 0 = all Returns: list: list of TermSets """ options = '--configuration handy --opt-strategy=usc,0 --opt-mode=optN,' + str(optimum) prg = [encoding, lp_instance] models = clyngor.solve(prg, options=options, nb_model=nmodels, use_clingo_module=False).by_arity.discard_quotes opt_models = clyngor.opt_models_from_clyngor_answers(models) return opt_models def get_all_communities(lp_instance, encoding, nmodels=0): """Get all communities, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP model encoding file nmodels (int, optional): Defaults to 0. number of models, 0 = all Returns: list: list of TermSets """ options = '--configuration handy --opt-strategy=usc,0 --opt-mode=optN' prg = [encoding, lp_instance] opt_models = clyngor.opt_models_from_clyngor_answers(clyngor.solve(prg, options=options, nb_model=nmodels, use_clingo_module=False).by_arity.discard_quotes) return opt_models def get_union_communities_from_g(grounding, optimum): """Get union of all community solutions Args: grounding (bytes): grounded model optimum (str): optimal score Returns: TermSet: union """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode=brave --opt-mode=optN,' + str(optimum) models = clyngor.solve_from_grounded(grounding, options=options, use_clingo_module=False) best_model = None for model in models.by_arity.discard_quotes.with_optimization: best_model = model return best_model def get_union_communities_from_g_noopti(grounding): """Get union of all community solutions, from grounding, without optimal score Args: grounding (bytes): grounded instance Returns: TermSet: union """ options = '--configuration jumpy --opt-strategy=usc,5 --enum-mode brave --opt-mode=optN' models = clyngor.solve_from_grounded(grounding, options=options, use_clingo_module=False) best_model = None for model in models.by_arity.discard_quotes.with_optimization: best_model = model return best_model def get_union_communities_optimum(lp_instance, optimum, encoding): """Get union of community solutions, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP encoding model file Returns: TermSet: union """ options ='--configuration jumpy --opt-strategy=usc,5 --enum-mode=brave --opt-mode=optN --opt-bound='+str(optimum) prg = [encoding, lp_instance] best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_union_communities(lp_instance, encoding): """Get union of community solutions, from TermSet Args: lp_instance (TermSet): microbiota model optimum (str): optimal score encoding (str): ASP encoding model file Returns: TermSet: union """ options ='--configuration jumpy --opt-strategy=usc,5 --enum-mode=brave --opt-mode=optN' prg = [encoding, lp_instance] best_model = None models = clyngor.solve(prg, options=options, use_clingo_module=False) for model in models.discard_quotes.by_arity.with_optimization: best_model = model return best_model def get_unproducible(draft, seeds, targets, encoding): """Get unproducible targets in a microbiota Args: draft (TermSet): metabolic model seeds (TermSet): seeds targets (TermSet): targets encoding (str): ASP model encoding Returns: TermSet: unproducible targets """ draft_f = utils.to_file(draft) seed_f = utils.to_file(seeds) target_f = utils.to_file(targets) prg = [encoding, draft_f, seed_f, target_f] solver = Gringo4Clasp() models = solver.run(prg,collapseTerms=True,collapseAtoms=False) os.unlink(draft_f) os.unlink(seed_f) os.unlink(target_f) return models[0] def get_transported(instance, encoding): """Get transported metabolites Args: instance (TermSet): microbiota model encoding (str): ASP model encoding Returns: TermSet: transported metabolites """ instance_f = utils.to_file(instance) prg = [encoding, instance_f] solver = Gringo4Clasp() models = solver.run(prg,collapseTerms=True,collapseAtoms=False) os.unlink(instance_f) return models[0] def get_grounded_instance_exchanged_metabolites(instance, encoding, exchanged_in_escope=False): """Get grounding under compartmentalized framework Args: instance (TermSet): microbiota model encoding (str): ASP model encoding exchanged_in_escope (bool, default=False): additional option for ASP Returns: bytes: grounded model """ instance_f = utils.to_file(instance) if exchanged_in_escope: options = "--const exchanged_in_escope=1" else: options = "--const exchanged_in_escope=0" print(os.path.abspath(instance_f)) prg = [encoding, instance_f] grounder = Gringo4(gringo_options=options) grounding = grounder.run(prg) os.unlink(instance_f) return grounding def get_grounded_instance_exchanged_metabolites_from_file(instance_f, encoding, exchanged_in_escope=False): """Get grounding from file Args: instance_f (str): microbiota model file encoding (str): ASP model encoding exchanged_in_escope (bool, default=False): additional ASP option Returns: bytes: grounded model """ if exchanged_in_escope: options = "--const exchanged_in_escope=1" else: options = "--const exchanged_in_escope=0" print(os.path.abspath(instance_f)) prg = [encoding, instance_f] grounder = Gringo4(gringo_options=options) grounding = grounder.run(prg) return grounding def get_exchanged_metabolites_onesol(grounding): """Select community in compartmentalized framework Args: grounding (bytes): grounded model Returns: TermSet: solution """ options = "--configuration=jumpy --opt-strategy=usc,5" solver = Clasp(clasp_options=options) models = solver.run(grounding,collapseTerms=True,collapseAtoms=False) return models def get_exchanged_metabolites_intersection(grounding, optimum): """Get intersection of communities solutions Args: grounding (bytes): grounded model optimum (str): optimal score Returns: TermSet: intersection """ options='--configuration jumpy --opt-strategy=usc,5 --enum-mode cautious --opt-mode=optN --opt-bound='+str(optimum) solver = Clasp(clasp_options=options) intersec = solver.run(grounding, collapseTerms=True, collapseAtoms=False) return intersec[0] def get_exchanged_metabolites_allsol(grounding, optimum, nmodels=0): """Get all communities Args: grounding (bytes): grounded model optimum (str): optimal score nmodels (int, optional): Defaults to 0. number of models, 0 = all Returns: Set: set of TermSets """ options = str(nmodels)+' --configuration jumpy --opt-strategy=usc,5 --opt-mode=optN --opt-bound='+str(optimum) solver = Clasp(clasp_options=options) models = solver.run(grounding, collapseTerms=True, collapseAtoms=False) return models def get_exchanged_metabolites_union(grounding, optimum): """Get union of community solutions Args: grounding (bytes): grounded model optimum (str): optimal score Returns: TermSet: union """ options ='--configuration jumpy --opt-strategy=usc,5 --enum-mode brave --opt-mode=optN --opt-bound='+str(optimum) solver = Clasp(clasp_options=options) union = solver.run(grounding, collapseTerms=True, collapseAtoms=False) return union[0]
PypiClean
/EulerPy-1.4.0.tar.gz/EulerPy-1.4.0/README.rst
********************************** EulerPy |Travis| |PyPI| |Homebrew| ********************************** EulerPy is a command line tool designed to streamline the process of solving Project Euler problems using Python. The package focuses on two main tasks: firstly, to create Python "template" files with a docstring containing the text of a Project Euler problem for ease-of-reference, and secondly, to check whether a problem has been solved correctly. ============ Installation ============ EulerPy can be installed (and updated) from PyPI using `pip`_: .. code-block:: bash $ pip install --upgrade EulerPy Conversely, it can be uninstalled using `pip`_ as well. .. code-block:: bash $ pip uninstall EulerPy Alternatively, OS X users can install EulerPy using `Homebrew`_: .. code-block:: bash $ brew install euler-py ===== Usage ===== First, you'll want to ``cd`` to the directory where your Project Euler files are being stored. .. code-block:: bash $ mkdir ~/project-euler $ cd ~/project-euler At this point, you'll probably want to run the ``euler`` command, which will prompt to create ``001.py``, a file containing the text to Project Euler problem #1 as its docstring. .. code-block:: bash $ euler No Project Euler files found in the current directory. Generate file for problem 1? [Y/n]: Y Successfully created "001.py". $ cat 001.py """ Project Euler Problem 1 ======================= If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. """ At this point, you can open up your editor of choice and code up a solution to the problem, making sure to ``print()`` the output. Once you feel that you've solved the problem, run the ``euler`` command again to verify your solution is correct. If the answer is correct, the solution will be printed in green and the script will ask to generate the next problem file. If incorrect, the solution will be printed in red instead. Additionally, the time elapsed during the solution-checking process will also be printed. .. code-block:: bash $ euler Checking "001.py" against solution: [no output] # (output in red) $ echo print 42 >> 001.py $ euler Checking "001.py" against solution: 42 # (output in green) Generate file for problem 2? [Y/n]: Y Successfully created "002.py". EulerPy also has a few command line options that act as different commands (use ``euler --help`` to see a summary of all the options). ``--cheat / -c`` ---------------- The ``--cheat`` option will print the answer to a problem after prompting the user to ensure that they want to see it. If no problem argument is given, it will print the answer to the problem that they are currently working on. .. code-block:: bash $ euler --cheat View answer to problem 2? [y/N]: Y The answer to problem 2 is <redacted>. $ euler --cheat 100 View answer to problem 100? [y/N]: Y The answer to problem 100 is <redacted>. ``--generate / -g`` ------------------- The ``--generate`` option will create a Python file for the given problem number. If no problem number is given, it will overwrite the most recent problem with a file containing only the problem docstring (after prompting the user). .. code-block:: bash $ euler --generate Generate file for problem 2? [Y/n]: Y "002.py" already exists. Overwrite? [y/N]: Successfully created "002.py". $ euler --generate 5 Generate file for problem 5? [Y/n]: n Aborted! ``euler <problem>`` is equivalent to ``euler --generate <problem>`` if the file **does not** exist. .. code-block:: bash $ cat 005.py cat: 005.py: No such file or directory $ euler 5 Generate file for problem 5? [Y/n]: n Aborted! The file generation process will also automatically copy any relevant resource files to a ``resources`` subdirectory. .. code-block:: bash $ euler 22 Generate file for problem 22? [Y/n]: Y Successfully created "022.py". Copied "names.txt" to project-euler/resources. ``--preview / -p`` ------------------ The ``--preview`` option will print the text of a given problem to the terminal; if no problem number is given, it will print the next problem instead. .. code-block:: bash $ euler --preview Project Euler Problem 3 The prime factors of 13195 are 5, 7, 13 and 29. What is the largest prime factor of the number 600851475143? $ euler --preview 5 Project Euler Problem 5 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder. What is the smallest number that is evenly divisible by all of the numbers from 1 to 20? ``--skip / -s`` --------------- The ``--skip`` option will prompt the user to "skip" to the next problem. As of EulerPy v1.1, it will also append a "skipped" suffix to the skipped problem file. .. code-block:: bash $ euler --skip Current problem is problem 2. Generate file for problem 3? [y/N]: Y Successfully created "003.py". Renamed "002.py" to "002-skipped.py". ``--verify / -v`` ----------------- The ``--verify`` option will check whether a given problem file outputs the correct solution to the problem. If no problem number is given, it will check the current problem. .. code-block:: bash $ euler --verify Checking "003.py" against solution: [no output] # (output in red) $ euler --verify 1 Checking "001.py" against solution: <redacted> # (output in green) As of EulerPy v1.1, verifying a skipped problem file will remove the "skipped" suffix from its filename. .. code-block:: bash $ euler --verify 2 Checking "002-skipped.py" against solution: <redacted> Renamed "002-skipped.py" to "002.py". ``euler <problem>`` is equivalent to ``euler --verify <problem>`` if the file **does** exist. .. code-block:: bash $ cat 001.py """ Project Euler Problem 1 ======================= If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000. """ $ euler 1 Checking "001.py" against solution: <redacted> ``--verify-all`` ---------------- The ``--verify-all`` option was added in EulerPy v1.1. It essentially runs ``--verify`` on all the problem files it can find in the current directory, but also prints an overview of all of the problems in the directory. Note that if the verification encounters a ``KeyboardInterrupt`` exception, it will skip the verification of that specific file. This allows for the ability to skip verifying some files but not others, in the case that some solutions are taking too long to compute. .. code-block:: bash $ euler --verify-all Checking "001.py" against solution: <redacted> Checking "002.py" against solution: ^C Checking "003.py" against solution: [no output] --------------------------------------------------------------- C = correct, I = incorrect, E = error, S = skipped, . = missing Problems 001-020: C S I . . . . . . . . . . . . . . . . . This option should be run after upgrading to v1.1 from EulerPy v1.0, as it will automatically rename any problems that have been skipped using ``--skip``, making them easy to distinguish from those that have been correctly solved. File Prefixes ------------- As of v1.3.0, EulerPy will attempt to keep the prefix of problem files consistent. The motivation behind this is that ``import 001`` results in a syntax error whereas ``import euler001`` is valid. By using the latter naming scheme, it is possible to reuse code written in previous files. .. code-block:: bash $ mv 003.py euler003.py $ euler --skip Current problem is problem 3. Generate file for problem 4? [y/N]: Y Successfully created "euler004.py". Renamed "euler003.py" to "euler003-skipped.py". ============ Contributing ============ See `CONTRIBUTING.rst`_. ============= Miscellaneous ============= The text for the problems in `problems.txt`_ were derived from Kyle Keen's `Local Euler`_ project, and the solutions in `solutions.txt`_ were derived from Bai Li's `projecteuler-solutions`_ repository. See `this blog post`_ for insight into the development process. EulerPy uses `Click`_ as a dependency for its CLI functionality. ======= License ======= EulerPy is licensed under the `MIT License`_. .. |Travis| image:: https://img.shields.io/travis/iKevinY/EulerPy/master.svg :alt: Build Status :target: http://travis-ci.org/iKevinY/EulerPy .. |PyPI| image:: https://img.shields.io/pypi/v/EulerPy.svg :alt: PyPI Version :target: https://pypi.python.org/pypi/EulerPy/ .. |Homebrew| image:: https://img.shields.io/homebrew/v/euler-py.svg :alt: Homebrew Version :target: https://github.com/Homebrew/homebrew-core/blob/master/Formula/euler-py.rb .. _pip: http://www.pip-installer.org/en/latest/index.html .. _Homebrew: http://brew.sh .. _CONTRIBUTING.rst: https://github.com/iKevinY/EulerPy/blob/master/CONTRIBUTING.rst .. _Local Euler: http://kmkeen.com/local-euler/ .. _problems.txt: https://github.com/iKevinY/EulerPy/blob/master/EulerPy/data/problems.txt .. _solutions.txt: https://github.com/iKevinY/EulerPy/blob/master/EulerPy/data/solutions.txt .. _projecteuler-solutions: https://github.com/luckytoilet/projecteuler-solutions .. _this blog post: http://kevinyap.ca/2014/06/eulerpy-streamlining-project-euler/ .. _click: https://github.com/mitsuhiko/click .. _MIT License: https://github.com/iKevinY/EulerPy/blob/master/LICENSE
PypiClean
/Appium-Python-Client-2.11.1.tar.gz/Appium-Python-Client-2.11.1/docs/_build/html/_static/language_data.js
var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"]; /* Non-minified version is copied as a separate JS file, is available */ /** * Porter Stemmer */ var Stemmer = function() { var step2list = { ational: 'ate', tional: 'tion', enci: 'ence', anci: 'ance', izer: 'ize', bli: 'ble', alli: 'al', entli: 'ent', eli: 'e', ousli: 'ous', ization: 'ize', ation: 'ate', ator: 'ate', alism: 'al', iveness: 'ive', fulness: 'ful', ousness: 'ous', aliti: 'al', iviti: 'ive', biliti: 'ble', logi: 'log' }; var step3list = { icate: 'ic', ative: '', alize: 'al', iciti: 'ic', ical: 'ic', ful: '', ness: '' }; var c = "[^aeiou]"; // consonant var v = "[aeiouy]"; // vowel var C = c + "[^aeiouy]*"; // consonant sequence var V = v + "[aeiou]*"; // vowel sequence var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 var s_v = "^(" + C + ")?" + v; // vowel in stem this.stemWord = function (w) { var stem; var suffix; var firstch; var origword = w; if (w.length < 3) return w; var re; var re2; var re3; var re4; firstch = w.substr(0,1); if (firstch == "y") w = firstch.toUpperCase() + w.substr(1); // Step 1a re = /^(.+?)(ss|i)es$/; re2 = /^(.+?)([^s])s$/; if (re.test(w)) w = w.replace(re,"$1$2"); else if (re2.test(w)) w = w.replace(re2,"$1$2"); // Step 1b re = /^(.+?)eed$/; re2 = /^(.+?)(ed|ing)$/; if (re.test(w)) { var fp = re.exec(w); re = new RegExp(mgr0); if (re.test(fp[1])) { re = /.$/; w = w.replace(re,""); } } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1]; re2 = new RegExp(s_v); if (re2.test(stem)) { w = stem; re2 = /(at|bl|iz)$/; re3 = new RegExp("([^aeiouylsz])\\1$"); re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re2.test(w)) w = w + "e"; else if (re3.test(w)) { re = /.$/; w = w.replace(re,""); } else if (re4.test(w)) w = w + "e"; } } // Step 1c re = /^(.+?)y$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(s_v); if (re.test(stem)) w = stem + "i"; } // Step 2 re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step2list[suffix]; } // Step 3 re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; suffix = fp[2]; re = new RegExp(mgr0); if (re.test(stem)) w = stem + step3list[suffix]; } // Step 4 re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; re2 = /^(.+?)(s|t)(ion)$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); if (re.test(stem)) w = stem; } else if (re2.test(w)) { var fp = re2.exec(w); stem = fp[1] + fp[2]; re2 = new RegExp(mgr1); if (re2.test(stem)) w = stem; } // Step 5 re = /^(.+?)e$/; if (re.test(w)) { var fp = re.exec(w); stem = fp[1]; re = new RegExp(mgr1); re2 = new RegExp(meq1); re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) w = stem; } re = /ll$/; re2 = new RegExp(mgr1); if (re.test(w) && re2.test(w)) { re = /.$/; w = w.replace(re,""); } // and turn initial Y back to y if (firstch == "y") w = firstch.toLowerCase() + w.substr(1); return w; } } var splitChars = (function() { var result = {}; var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648, 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702, 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971, 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345, 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761, 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823, 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125, 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695, 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587, 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141]; var i, j, start, end; for (i = 0; i < singles.length; i++) { result[singles[i]] = true; } var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709], [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161], [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568], [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807], [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047], [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383], [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450], [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547], [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673], [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820], [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946], [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023], [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173], [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332], [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481], [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718], [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791], [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095], [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205], [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687], [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968], [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869], [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102], [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271], [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592], [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822], [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167], [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959], [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143], [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318], [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483], [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101], [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567], [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292], [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444], [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783], [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311], [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511], [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774], [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071], [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263], [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519], [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647], [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967], [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295], [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274], [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007], [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381], [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]]; for (i = 0; i < ranges.length; i++) { start = ranges[i][0]; end = ranges[i][1]; for (j = start; j <= end; j++) { result[j] = true; } } return result; })(); function splitQuery(query) { var result = []; var start = -1; for (var i = 0; i < query.length; i++) { if (splitChars[query.charCodeAt(i)]) { if (start !== -1) { result.push(query.slice(start, i)); start = -1; } } else if (start === -1) { start = i; } } if (start !== -1) { result.push(query.slice(start)); } return result; }
PypiClean
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/app-localize-behavior/.github/ISSUE_TEMPLATE.md
<!-- Instructions: https://github.com/PolymerElements/app-localize-behavior/CONTRIBUTING.md#filing-issues --> ### Description <!-- Example: The `paper-foo` element causes the page to turn pink when clicked. --> ### Expected outcome <!-- Example: The page stays the same color. --> ### Actual outcome <!-- Example: The page turns pink. --> ### Live Demo <!-- Example: https://jsbin.com/cagaye/edit?html,output --> ### Steps to reproduce <!-- Example 1. Put a `paper-foo` element in the page. 2. Open the page in a web browser. 3. Click the `paper-foo` element. --> ### Browsers Affected <!-- Check all that apply --> - [ ] Chrome - [ ] Firefox - [ ] Safari 9 - [ ] Safari 8 - [ ] Safari 7 - [ ] Edge - [ ] IE 11 - [ ] IE 10
PypiClean
/CouchDB-1.2.tar.gz/CouchDB-1.2/couchdb/mapping.py
import copy from calendar import timegm from datetime import date, datetime, time from decimal import Decimal from time import strptime, struct_time from couchdb.design import ViewDefinition from couchdb import util __all__ = ['Mapping', 'Document', 'Field', 'TextField', 'FloatField', 'IntegerField', 'LongField', 'BooleanField', 'DecimalField', 'DateField', 'DateTimeField', 'TimeField', 'DictField', 'ListField', 'ViewField'] __docformat__ = 'restructuredtext en' DEFAULT = object() class Field(object): """Basic unit for mapping a piece of data between Python and JSON. Instances of this class can be added to subclasses of `Document` to describe the mapping of a document. """ def __init__(self, name=None, default=None): self.name = name self.default = default def __get__(self, instance, owner): if instance is None: return self value = instance._data.get(self.name) if value is not None: value = self._to_python(value) elif self.default is not None: default = self.default if callable(default): default = default() value = default return value def __set__(self, instance, value): if value is not None: value = self._to_json(value) instance._data[self.name] = value def _to_python(self, value): return util.utype(value) def _to_json(self, value): return self._to_python(value) class MappingMeta(type): def __new__(cls, name, bases, d): fields = {} for base in bases: if hasattr(base, '_fields'): fields.update(base._fields) for attrname, attrval in d.items(): if isinstance(attrval, Field): if not attrval.name: attrval.name = attrname fields[attrname] = attrval d['_fields'] = fields return type.__new__(cls, name, bases, d) MappingMetaClass = MappingMeta('MappingMetaClass', (object,), {}) class Mapping(MappingMetaClass): def __init__(self, **values): self._data = {} for attrname, field in self._fields.items(): if attrname in values: setattr(self, attrname, values.pop(attrname)) else: setattr(self, attrname, getattr(self, attrname)) def __iter__(self): return iter(self._data) def __len__(self): return len(self._data or ()) def __delitem__(self, name): del self._data[name] def __getitem__(self, name): return self._data[name] def __setitem__(self, name, value): self._data[name] = value def get(self, name, default=None): return self._data.get(name, default) def setdefault(self, name, default): return self._data.setdefault(name, default) def unwrap(self): return self._data @classmethod def build(cls, **d): fields = {} for attrname, attrval in d.items(): if not attrval.name: attrval.name = attrname fields[attrname] = attrval d['_fields'] = fields return type('AnonymousStruct', (cls,), d) @classmethod def wrap(cls, data): instance = cls() instance._data = data return instance def _to_python(self, value): return self.wrap(value) def _to_json(self, value): return self.unwrap() class ViewField(object): r"""Descriptor that can be used to bind a view definition to a property of a `Document` class. >>> class Person(Document): ... name = TextField() ... age = IntegerField() ... by_name = ViewField('people', '''\ ... function(doc) { ... emit(doc.name, doc); ... }''') >>> Person.by_name <ViewDefinition '_design/people/_view/by_name'> >>> print(Person.by_name.map_fun) function(doc) { emit(doc.name, doc); } That property can be used as a function, which will execute the view. >>> from couchdb import Database >>> db = Database('python-tests') >>> Person.by_name(db, count=3) <ViewResults <PermanentView '_design/people/_view/by_name'> {'count': 3}> The results produced by the view are automatically wrapped in the `Document` subclass the descriptor is bound to. In this example, it would return instances of the `Person` class. But please note that this requires the values of the view results to be dictionaries that can be mapped to the mapping defined by the containing `Document` class. Alternatively, the ``include_docs`` query option can be used to inline the actual documents in the view results, which will then be used instead of the values. If you use Python view functions, this class can also be used as a decorator: >>> class Person(Document): ... name = TextField() ... age = IntegerField() ... ... @ViewField.define('people') ... def by_name(doc): ... yield doc['name'], doc >>> Person.by_name <ViewDefinition '_design/people/_view/by_name'> >>> print(Person.by_name.map_fun) def by_name(doc): yield doc['name'], doc """ def __init__(self, design, map_fun, reduce_fun=None, name=None, language='javascript', wrapper=DEFAULT, **defaults): """Initialize the view descriptor. :param design: the name of the design document :param map_fun: the map function code :param reduce_fun: the reduce function code (optional) :param name: the actual name of the view in the design document, if it differs from the name the descriptor is assigned to :param language: the name of the language used :param wrapper: an optional callable that should be used to wrap the result rows :param defaults: default query string parameters to apply """ self.design = design self.name = name self.map_fun = map_fun self.reduce_fun = reduce_fun self.language = language self.wrapper = wrapper self.defaults = defaults @classmethod def define(cls, design, name=None, language='python', wrapper=DEFAULT, **defaults): """Factory method for use as a decorator (only suitable for Python view code). """ def view_wrapped(fun): return cls(design, fun, language=language, wrapper=wrapper, **defaults) return view_wrapped def __get__(self, instance, cls=None): if self.wrapper is DEFAULT: wrapper = cls._wrap_row else: wrapper = self.wrapper return ViewDefinition(self.design, self.name, self.map_fun, self.reduce_fun, language=self.language, wrapper=wrapper, **self.defaults) class DocumentMeta(MappingMeta): def __new__(cls, name, bases, d): for attrname, attrval in d.items(): if isinstance(attrval, ViewField): if not attrval.name: attrval.name = attrname return MappingMeta.__new__(cls, name, bases, d) DocumentMetaClass = DocumentMeta('DocumentMetaClass', (object,), {}) class Document(DocumentMetaClass, Mapping): def __init__(self, id=None, **values): Mapping.__init__(self, **values) if id is not None: self.id = id def __repr__(self): return '<%s %r@%r %r>' % (type(self).__name__, self.id, self.rev, dict([(k, v) for k, v in self._data.items() if k not in ('_id', '_rev')])) def _get_id(self): if hasattr(self._data, 'id'): # When data is client.Document return self._data.id return self._data.get('_id') def _set_id(self, value): if self.id is not None: raise AttributeError('id can only be set on new documents') self._data['_id'] = value id = property(_get_id, _set_id, doc='The document ID') @property def rev(self): """The document revision. :rtype: basestring """ if hasattr(self._data, 'rev'): # When data is client.Document return self._data.rev return self._data.get('_rev') def items(self): """Return the fields as a list of ``(name, value)`` tuples. This method is provided to enable easy conversion to native dictionary objects, for example to allow use of `mapping.Document` instances with `client.Database.update`. >>> class Post(Document): ... title = TextField() ... author = TextField() >>> post = Post(id='foo-bar', title='Foo bar', author='Joe') >>> sorted(post.items()) [('_id', 'foo-bar'), ('author', u'Joe'), ('title', u'Foo bar')] :return: a list of ``(name, value)`` tuples """ retval = [] if self.id is not None: retval.append(('_id', self.id)) if self.rev is not None: retval.append(('_rev', self.rev)) for name, value in self._data.items(): if name not in ('_id', '_rev'): retval.append((name, value)) return retval @classmethod def load(cls, db, id): """Load a specific document from the given database. :param db: the `Database` object to retrieve the document from :param id: the document ID :return: the `Document` instance, or `None` if no document with the given ID was found """ doc = db.get(id) if doc is None: return None return cls.wrap(doc) def store(self, db): """Store the document in the given database.""" db.save(self._data) return self @classmethod def query(cls, db, map_fun, reduce_fun, language='javascript', **options): """Execute a CouchDB temporary view and map the result values back to objects of this mapping. Note that by default, any properties of the document that are not included in the values of the view will be treated as if they were missing from the document. If you want to load the full document for every row, set the ``include_docs`` option to ``True``. """ return db.query(map_fun, reduce_fun=reduce_fun, language=language, wrapper=cls._wrap_row, **options) @classmethod def view(cls, db, viewname, **options): """Execute a CouchDB named view and map the result values back to objects of this mapping. Note that by default, any properties of the document that are not included in the values of the view will be treated as if they were missing from the document. If you want to load the full document for every row, set the ``include_docs`` option to ``True``. """ return db.view(viewname, wrapper=cls._wrap_row, **options) @classmethod def _wrap_row(cls, row): doc = row.get('doc') if doc is not None: return cls.wrap(doc) data = row['value'] data['_id'] = row['id'] if 'rev' in data: # When data is client.Document data['_rev'] = data['rev'] return cls.wrap(data) class TextField(Field): """Mapping field for string values.""" _to_python = util.utype class FloatField(Field): """Mapping field for float values.""" _to_python = float class IntegerField(Field): """Mapping field for integer values.""" _to_python = int class LongField(Field): """Mapping field for long integer values.""" _to_python = util.ltype class BooleanField(Field): """Mapping field for boolean values.""" _to_python = bool class DecimalField(Field): """Mapping field for decimal values.""" def _to_python(self, value): return Decimal(value) def _to_json(self, value): return util.utype(value) class DateField(Field): """Mapping field for storing dates. >>> field = DateField() >>> field._to_python('2007-04-01') datetime.date(2007, 4, 1) >>> field._to_json(date(2007, 4, 1)) '2007-04-01' >>> field._to_json(datetime(2007, 4, 1, 15, 30)) '2007-04-01' """ def _to_python(self, value): if isinstance(value, util.strbase): try: value = date(*strptime(value, '%Y-%m-%d')[:3]) except ValueError: raise ValueError('Invalid ISO date %r' % value) return value def _to_json(self, value): if isinstance(value, datetime): value = value.date() return value.isoformat() class DateTimeField(Field): """Mapping field for storing date/time values. >>> field = DateTimeField() >>> field._to_python('2007-04-01T15:30:00Z') datetime.datetime(2007, 4, 1, 15, 30) >>> field._to_python('2007-04-01T15:30:00.009876Z') datetime.datetime(2007, 4, 1, 15, 30, 0, 9876) >>> field._to_json(datetime(2007, 4, 1, 15, 30, 0)) '2007-04-01T15:30:00Z' >>> field._to_json(datetime(2007, 4, 1, 15, 30, 0, 9876)) '2007-04-01T15:30:00.009876Z' >>> field._to_json(date(2007, 4, 1)) '2007-04-01T00:00:00Z' """ def _to_python(self, value): if isinstance(value, util.strbase): try: split_value = value.split('.') # strip out microseconds if len(split_value) == 1: # No microseconds provided value = split_value[0] value = value.rstrip('Z') #remove timezone separator value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S') else: value = value.rstrip('Z') value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f') except ValueError: raise ValueError('Invalid ISO date/time %r' % value) return value def _to_json(self, value): if isinstance(value, struct_time): value = datetime.utcfromtimestamp(timegm(value)) elif not isinstance(value, datetime): value = datetime.combine(value, time(0)) return value.isoformat() + 'Z' class TimeField(Field): """Mapping field for storing times. >>> field = TimeField() >>> field._to_python('15:30:00') datetime.time(15, 30) >>> field._to_json(time(15, 30)) '15:30:00' >>> field._to_json(datetime(2007, 4, 1, 15, 30)) '15:30:00' """ def _to_python(self, value): if isinstance(value, util.strbase): try: value = value.split('.', 1)[0] # strip out microseconds value = time(*strptime(value, '%H:%M:%S')[3:6]) except ValueError: raise ValueError('Invalid ISO time %r' % value) return value def _to_json(self, value): if isinstance(value, datetime): value = value.time() return value.replace(microsecond=0).isoformat() class DictField(Field): """Field type for nested dictionaries. >>> from couchdb import Server >>> server = Server() >>> db = server.create('python-tests') >>> class Post(Document): ... title = TextField() ... content = TextField() ... author = DictField(Mapping.build( ... name = TextField(), ... email = TextField() ... )) ... extra = DictField() >>> post = Post( ... title='Foo bar', ... author=dict(name='John Doe', ... email='[email protected]'), ... extra=dict(foo='bar'), ... ) >>> post.store(db) #doctest: +ELLIPSIS <Post ...> >>> post = Post.load(db, post.id) >>> post.author.name u'John Doe' >>> post.author.email u'[email protected]' >>> post.extra {u'foo': u'bar'} >>> del server['python-tests'] """ def __init__(self, mapping=None, name=None, default=None): default = default or {} Field.__init__(self, name=name, default=lambda: default.copy()) self.mapping = mapping def _to_python(self, value): if self.mapping is None: return value else: return self.mapping.wrap(value) def _to_json(self, value): if self.mapping is None: return value if not isinstance(value, Mapping): value = self.mapping(**value) return value.unwrap() class ListField(Field): """Field type for sequences of other fields. >>> from couchdb import Server >>> server = Server() >>> db = server.create('python-tests') >>> class Post(Document): ... title = TextField() ... content = TextField() ... pubdate = DateTimeField(default=datetime.now) ... comments = ListField(DictField(Mapping.build( ... author = TextField(), ... content = TextField(), ... time = DateTimeField() ... ))) >>> post = Post(title='Foo bar') >>> post.comments.append(author='myself', content='Bla bla', ... time=datetime.now()) >>> len(post.comments) 1 >>> post.store(db) #doctest: +ELLIPSIS <Post ...> >>> post = Post.load(db, post.id) >>> comment = post.comments[0] >>> comment['author'] u'myself' >>> comment['content'] u'Bla bla' >>> comment['time'] #doctest: +ELLIPSIS u'...T...Z' >>> del server['python-tests'] """ def __init__(self, field, name=None, default=None): default = default or [] Field.__init__(self, name=name, default=lambda: copy.copy(default)) if type(field) is type: if issubclass(field, Field): field = field() elif issubclass(field, Mapping): field = DictField(field) self.field = field def _to_python(self, value): return self.Proxy(value, self.field) def _to_json(self, value): return [self.field._to_json(item) for item in value] class Proxy(list): def __init__(self, list, field): self.list = list self.field = field def __lt__(self, other): return self.list < other def __le__(self, other): return self.list <= other def __eq__(self, other): return self.list == other def __ne__(self, other): return self.list != other def __gt__(self, other): return self.list > other def __ge__(self, other): return self.list >= other def __repr__(self): return repr(self.list) def __str__(self): return str(self.list) def __unicode__(self): return util.utype(self.list) def __delitem__(self, index): if isinstance(index, slice): self.__delslice__(index.start, index.stop) else: del self.list[index] def __getitem__(self, index): if isinstance(index, slice): return self.__getslice__(index.start, index.stop) return self.field._to_python(self.list[index]) def __setitem__(self, index, value): if isinstance(index, slice): self.__setslice__(index.start, index.stop, value) else: self.list[index] = self.field._to_json(value) def __delslice__(self, i, j): del self.list[i:j] def __getslice__(self, i, j): return ListField.Proxy(self.list[i:j], self.field) def __setslice__(self, i, j, seq): self.list[i:j] = (self.field._to_json(v) for v in seq) def __contains__(self, value): for item in self.list: if self.field._to_python(item) == value: return True return False def __iter__(self): for index in range(len(self)): yield self[index] def __len__(self): return len(self.list) def __nonzero__(self): return bool(self.list) def append(self, *args, **kwargs): if args or not isinstance(self.field, DictField): if len(args) != 1: raise TypeError('append() takes exactly one argument ' '(%s given)' % len(args)) value = args[0] else: value = kwargs self.list.append(self.field._to_json(value)) def count(self, value): return [i for i in self].count(value) def extend(self, list): for item in list: self.append(item) def index(self, value): return self.list.index(self.field._to_json(value)) def insert(self, idx, *args, **kwargs): if args or not isinstance(self.field, DictField): if len(args) != 1: raise TypeError('insert() takes exactly 2 arguments ' '(%s given)' % len(args)) value = args[0] else: value = kwargs self.list.insert(idx, self.field._to_json(value)) def remove(self, value): return self.list.remove(self.field._to_json(value)) def pop(self, *args): return self.field._to_python(self.list.pop(*args))
PypiClean
/Captcha-Impulse-0.0.9.tar.gz/Captcha-Impulse-0.0.9/src/impulse/yolov5/utils/general.py
import contextlib import glob import logging import math import os import platform import random import re import shutil import signal import time import urllib from itertools import repeat from multiprocessing.pool import ThreadPool from pathlib import Path from subprocess import check_output from zipfile import ZipFile import cv2 import numpy as np import pandas as pd import pkg_resources as pkg import torch import torchvision import yaml from utils.downloads import gsutil_getsize from utils.metrics import box_iou, fitness # Settings FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads VERBOSE = str(os.getenv('VERBOSE', True)).lower() == 'true' # global verbose mode torch.set_printoptions(linewidth=320, precision=5, profile='long') np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 pd.options.display.max_columns = 10 cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads def is_kaggle(): # Is environment a Kaggle Notebook? try: assert os.environ.get('PWD') == '/kaggle/working' assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' return True except AssertionError: return False def set_logging(name=None, verbose=VERBOSE): # Sets level and returns logger if is_kaggle(): for h in logging.root.handlers: logging.root.removeHandler(h) # remove all handlers associated with the root logger object rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) return logging.getLogger(name) LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.) class Profile(contextlib.ContextDecorator): # Usage: @Profile() decorator or 'with Profile():' context manager def __enter__(self): self.start = time.time() def __exit__(self, type, value, traceback): print(f'Profile results: {time.time() - self.start:.5f}s') class Timeout(contextlib.ContextDecorator): # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): self.seconds = int(seconds) self.timeout_message = timeout_msg self.suppress = bool(suppress_timeout_errors) def _timeout_handler(self, signum, frame): raise TimeoutError(self.timeout_message) def __enter__(self): signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM signal.alarm(self.seconds) # start countdown for SIGALRM to be raised def __exit__(self, exc_type, exc_val, exc_tb): signal.alarm(0) # Cancel SIGALRM if it's scheduled if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError return True class WorkingDirectory(contextlib.ContextDecorator): # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager def __init__(self, new_dir): self.dir = new_dir # new dir self.cwd = Path.cwd().resolve() # current dir def __enter__(self): os.chdir(self.dir) def __exit__(self, exc_type, exc_val, exc_tb): os.chdir(self.cwd) def try_except(func): # try-except function. Usage: @try_except decorator def handler(*args, **kwargs): try: func(*args, **kwargs) except Exception as e: print(e) return handler def methods(instance): # Get class/instance methods return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] def print_args(name, opt): # Print argparser arguments LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) def init_seeds(seed=0): # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible import torch.backends.cudnn as cudnn random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) def intersect_dicts(da, db, exclude=()): # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} def get_latest_run(search_dir='.'): # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) return max(last_list, key=os.path.getctime) if last_list else '' def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. env = os.getenv(env_var) if env: path = Path(env) # use environment variable else: cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable path.mkdir(exist_ok=True) # make if required return path def is_writeable(dir, test=False): # Return True if directory has write permissions, test opening a file with write permissions if test=True if test: # method 1 file = Path(dir) / 'tmp.txt' try: with open(file, 'w'): # open file with write permissions pass file.unlink() # remove file return True except OSError: return False else: # method 2 return os.access(dir, os.R_OK) # possible issues on Windows def is_docker(): # Is environment a Docker container? return Path('/workspace').exists() # or Path('/.dockerenv').exists() def is_colab(): # Is environment a Google Colab instance? try: import google.colab return True except ImportError: return False def is_pip(): # Is file in a pip package? return 'site-packages' in Path(__file__).resolve().parts def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) s = str(s) # convert list, tuple, None, etc. to str return len(s.encode().decode('ascii', 'ignore')) == len(s) def is_chinese(s='人工智能'): # Is string composed of any Chinese characters? return re.search('[\u4e00-\u9fff]', s) def emojis(str=''): # Return platform-dependent emoji-safe version of string return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str def file_size(path): # Return file/dir size (MB) path = Path(path) if path.is_file(): return path.stat().st_size / 1E6 elif path.is_dir(): return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 else: return 0.0 def check_online(): # Check internet connectivity import socket try: socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility return True except OSError: return False @try_except @WorkingDirectory(ROOT) def check_git_status(): # Recommend 'git pull' if code is out of date msg = ', for updates see https://github.com/ultralytics/yolov5' print(colorstr('github: '), end='') assert Path('.git').exists(), 'skipping check (not a git repository)' + msg assert not is_docker(), 'skipping check (Docker image)' + msg assert check_online(), 'skipping check (offline)' + msg cmd = 'git fetch && git config --get remote.origin.url' url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind if n > 0: s = f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." else: s = f'up to date with {url} ✅' print(emojis(s)) # emoji-safe def check_python(minimum='3.6.2'): # Check current python version vs. required python version check_version(platform.python_version(), minimum, name='Python ', hard=True) def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): # Check version vs. required version current, minimum = (pkg.parse_version(x) for x in (current, minimum)) result = (current == minimum) if pinned else (current >= minimum) # bool s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string if hard: assert result, s # assert min requirements met if verbose and not result: LOGGER.warning(s) return result def check_img_size(imgsz, s=32, floor=0): # Verify image size is a multiple of stride s in each dimension if isinstance(imgsz, int): # integer i.e. img_size=640 new_size = max(make_divisible(imgsz, int(s)), floor) else: # list i.e. img_size=[640, 480] new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] # if new_size != imgsz: # print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') return new_size def check_imshow(): # Check if environment supports image displays try: assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' cv2.imshow('test', np.zeros((1, 1, 3))) cv2.waitKey(1) cv2.destroyAllWindows() cv2.waitKey(1) return True except Exception as e: print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') return False def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): # Check file(s) for acceptable suffix if file and suffix: if isinstance(suffix, str): suffix = [suffix] for f in file if isinstance(file, (list, tuple)) else [file]: s = Path(f).suffix.lower() # file suffix if len(s): assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" def check_yaml(file, suffix=('.yaml', '.yml')): # Search/download YAML file (if necessary) and return path, checking suffix return check_file(file, suffix) def check_file(file, suffix='', add_suffix=False): # Search/download file (if necessary) and return path check_suffix(file, suffix) # optional file = str(file) # convert to str() file += "" if not add_suffix else ".png" if Path(file).is_file() or file == '': # exists return file elif file.startswith(('http:/', 'https:/')): # download url = file if add_suffix: url = url[:-4] file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth if Path(file).is_file(): print(f'Found {url} locally at {file}') # file already exists else: LOGGER.info(f'Downloading {url} to {file}...') torch.hub.download_url_to_file(url, file, progress=False) assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check return file else: # search files = [] for d in 'data', 'models', 'utils': # search directories files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file assert len(files), f'File not found: {file}' # assert file was found assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique return files[0] # return file def check_dataset(data, autodownload=True): # Download and/or unzip dataset if not found locally # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip # Download (optional) extract_dir = '' if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip download(data, dir='../datasets', unzip=True, delete=False, curl=False, threads=1) data = next((Path('../datasets') / Path(data).stem).rglob('*.yaml')) extract_dir, autodownload = data.parent, False # Read yaml (optional) if isinstance(data, (str, Path)): with open(data, errors='ignore') as f: data = yaml.safe_load(f) # dictionary # Parse yaml path = extract_dir or Path(data.get('path') or '') # optional 'path' default to '.' for k in 'train', 'val', 'test': if data.get(k): # prepend path data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] assert 'nc' in data, "Dataset 'nc' key missing." if 'names' not in data: data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) if val: val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path if not all(x.exists() for x in val): print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) if s and autodownload: # download script root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' if s.startswith('http') and s.endswith('.zip'): # URL f = Path(s).name # filename LOGGER.info(f'Downloading {s} to {f}...') torch.hub.download_url_to_file(s, f, progress=False) Path(root).mkdir(parents=True, exist_ok=True) # create root ZipFile(f).extractall(path=root) # unzip Path(f).unlink() # remove zip r = None # success elif s.startswith('bash '): # bash script print(f'Running {s} ...') r = os.system(s) else: # python script r = exec(s, {'yaml': data}) # return None print(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") else: raise Exception('Dataset not found.') return data # dictionary def url2file(url): # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth return file def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): # Multi-threaded file download and unzip function, used in data.yaml for autodownload def download_one(url, dir): # Download 1 file f = dir / Path(url).name # filename if Path(url).is_file(): # exists in current path Path(url).rename(f) # move to dir elif not f.exists(): LOGGER.info(f'Downloading {url} to {f}...') if curl: os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail else: torch.hub.download_url_to_file(url, f, progress=True) # torch download if unzip and f.suffix in ('.zip', '.gz'): print(f'Unzipping {f}...') if f.suffix == '.zip': ZipFile(f).extractall(path=dir) # unzip elif f.suffix == '.gz': os.system(f'tar xfz {f} --directory {f.parent}') # unzip if delete: f.unlink() # remove zip dir = Path(dir) dir.mkdir(parents=True, exist_ok=True) # make directory if threads > 1: pool = ThreadPool(threads) pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded pool.close() pool.join() else: for u in [url] if isinstance(url, (str, Path)) else url: download_one(u, dir) def make_divisible(x, divisor): # Returns nearest x divisible by divisor if isinstance(divisor, torch.Tensor): divisor = int(divisor.max()) # to int return math.ceil(x / divisor) * divisor def clean_str(s): # Cleans a string by replacing special characters with underscore _ return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) def one_cycle(y1=0.0, y2=1.0, steps=100): # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 def colorstr(*input): # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string colors = {'black': '\033[30m', # basic colors 'red': '\033[31m', 'green': '\033[32m', 'yellow': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'white': '\033[37m', 'bright_black': '\033[90m', # bright colors 'bright_red': '\033[91m', 'bright_green': '\033[92m', 'bright_yellow': '\033[93m', 'bright_blue': '\033[94m', 'bright_magenta': '\033[95m', 'bright_cyan': '\033[96m', 'bright_white': '\033[97m', 'end': '\033[0m', # misc 'bold': '\033[1m', 'underline': '\033[4m'} return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] def labels_to_class_weights(labels, nc=80): # Get class weights (inverse frequency) from training labels if labels[0] is None: # no labels loaded return torch.Tensor() labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO classes = labels[:, 0].astype(np.int) # labels = [class xywh] weights = np.bincount(classes, minlength=nc) # occurrences per class # Prepend gridpoint count (for uCE training) # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start weights[weights == 0] = 1 # replace empty bins with 1 weights = 1 / weights # number of targets per class weights /= weights.sum() # normalize return torch.from_numpy(weights) def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): # Produces image weights based on class_weights and image contents class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample return image_weights def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] return x def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center y[:, 2] = x[:, 2] - x[:, 0] # width y[:, 3] = x[:, 3] - x[:, 1] # height return y def xywh2xyxy(x): # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y return y def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y return y def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right if clip: clip_coords(x, (h - eps, w - eps)) # warning: inplace clip y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center y[:, 2] = (x[:, 2] - x[:, 0]) / w # width y[:, 3] = (x[:, 3] - x[:, 1]) / h # height return y def xyn2xy(x, w=640, h=640, padw=0, padh=0): # Convert normalized segments into pixel segments, shape (n,2) y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) y[:, 0] = w * x[:, 0] + padw # top left x y[:, 1] = h * x[:, 1] + padh # top left y return y def segment2box(segment, width=640, height=640): # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) x, y = segment.T # segment xy inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) x, y, = x[inside], y[inside] return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy def segments2boxes(segments): # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) boxes = [] for s in segments: x, y = s.T # segment xy boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy return xyxy2xywh(np.array(boxes)) # cls, xywh def resample_segments(segments, n=1000): # Up-sample an (n,2) segment for i, s in enumerate(segments): x = np.linspace(0, len(s) - 1, n) xp = np.arange(len(s)) segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy return segments def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): # Rescale coords (xyxy) from img1_shape to img0_shape if ratio_pad is None: # calculate from img0_shape gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding else: gain = ratio_pad[0][0] pad = ratio_pad[1] coords[:, [0, 2]] -= pad[0] # x padding coords[:, [1, 3]] -= pad[1] # y padding coords[:, :4] /= gain clip_coords(coords, img0_shape) return coords def clip_coords(boxes, shape): # Clip bounding xyxy bounding boxes to image shape (height, width) if isinstance(boxes, torch.Tensor): # faster individually boxes[:, 0].clamp_(0, shape[1]) # x1 boxes[:, 1].clamp_(0, shape[0]) # y1 boxes[:, 2].clamp_(0, shape[1]) # x2 boxes[:, 3].clamp_(0, shape[0]) # y2 else: # np.array (faster grouped) boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=(), max_det=300): """Runs Non-Maximum Suppression (NMS) on inference results Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ nc = prediction.shape[2] - 5 # number of classes xc = prediction[..., 4] > conf_thres # candidates # Checks assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' # Settings min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() time_limit = 10.0 # seconds to quit after redundant = True # require redundant detections multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) merge = False # use merge-NMS t = time.time() output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] for xi, x in enumerate(prediction): # image index, image inference # Apply constraints # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height x = x[xc[xi]] # confidence # Cat apriori labels if autolabelling if labels and len(labels[xi]): l = labels[xi] v = torch.zeros((len(l), nc + 5), device=x.device) v[:, :4] = l[:, 1:5] # box v[:, 4] = 1.0 # conf v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls x = torch.cat((x, v), 0) # If none remain process next image if not x.shape[0]: continue # Compute conf x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf # Box (center x, center y, width, height) to (x1, y1, x2, y2) box = xywh2xyxy(x[:, :4]) # Detections matrix nx6 (xyxy, conf, cls) if multi_label: i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) else: # best class only conf, j = x[:, 5:].max(1, keepdim=True) x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] # Filter by class if classes is not None: x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] # Apply finite constraint # if not torch.isfinite(x).all(): # x = x[torch.isfinite(x).all(1)] # Check shape n = x.shape[0] # number of boxes if not n: # no boxes continue elif n > max_nms: # excess boxes x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence # Batched NMS c = x[:, 5:6] * (0 if agnostic else max_wh) # classes boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS if i.shape[0] > max_det: # limit detections i = i[:max_det] if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix weights = iou * scores[None] # box weights x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes if redundant: i = i[iou.sum(1) > 1] # require redundancy output[xi] = x[i] if (time.time() - t) > time_limit: print(f'WARNING: NMS time limit {time_limit}s exceeded') break # time limit exceeded return output def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() # Strip optimizer from 'f' to finalize training, optionally save as 's' x = torch.load(f, map_location=torch.device('cpu')) if x.get('ema'): x['model'] = x['ema'] # replace model with ema for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys x[k] = None x['epoch'] = -1 x['model'].half() # to FP16 for p in x['model'].parameters(): p.requires_grad = False torch.save(x, s or f) mb = os.path.getsize(s or f) / 1E6 # filesize print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") def print_mutation(results, hyp, save_dir, bucket): evolve_csv, results_csv, evolve_yaml = save_dir / 'evolve.csv', save_dir / 'results.csv', save_dir / 'hyp_evolve.yaml' keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] keys = tuple(x.strip() for x in keys) vals = results + tuple(hyp.values()) n = len(keys) # Download (optional) if bucket: url = f'gs://{bucket}/evolve.csv' if gsutil_getsize(url) > (os.path.getsize(evolve_csv) if os.path.exists(evolve_csv) else 0): os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local # Log to evolve.csv s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header with open(evolve_csv, 'a') as f: f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') # Print to screen print(colorstr('evolve: ') + ', '.join(f'{x.strip():>20s}' for x in keys)) print(colorstr('evolve: ') + ', '.join(f'{x:20.5g}' for x in vals), end='\n\n\n') # Save yaml with open(evolve_yaml, 'w') as f: data = pd.read_csv(evolve_csv) data = data.rename(columns=lambda x: x.strip()) # strip keys i = np.argmax(fitness(data.values[:, :7])) # f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + f'# Last generation: {len(data) - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') yaml.safe_dump(hyp, f, sort_keys=False) if bucket: os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload def apply_classifier(x, model, img, im0): # Apply a second stage classifier to YOLO outputs # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() im0 = [im0] if isinstance(im0, np.ndarray) else im0 for i, d in enumerate(x): # per image if d is not None and len(d): d = d.clone() # Reshape and pad cutouts b = xyxy2xywh(d[:, :4]) # boxes b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad d[:, :4] = xywh2xyxy(b).long() # Rescale boxes from img_size to im0 size scale_coords(img.shape[2:], d[:, :4], im0[i].shape) # Classes pred_cls1 = d[:, 5].long() ims = [] for j, a in enumerate(d): # per item cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] im = cv2.resize(cutout, (224, 224)) # BGR # cv2.imwrite('example%i.jpg' % j, cutout) im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 im /= 255 # 0 - 255 to 0.0 - 1.0 ims.append(im) pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections return x def increment_path(path, exist_ok=False, sep='', mkdir=False): # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number path = Path(f"{path}{sep}{n}{suffix}") # increment path if mkdir: path.mkdir(parents=True, exist_ok=True) # make directory return path # Variables NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm
PypiClean
/MetaGram-2.0.2.tar.gz/MetaGram-2.0.2/pyrogram/crypto/aes.py
import logging log = logging.getLogger(__name__) try: import tgcrypto log.info("Using TgCrypto") def ige256_encrypt(data: bytes, key: bytes, iv: bytes) -> bytes: return tgcrypto.ige256_encrypt(data, key, iv) def ige256_decrypt(data: bytes, key: bytes, iv: bytes) -> bytes: return tgcrypto.ige256_decrypt(data, key, iv) def ctr256_encrypt(data: bytes, key: bytes, iv: bytearray, state: bytearray = None) -> bytes: return tgcrypto.ctr256_encrypt(data, key, iv, state or bytearray(1)) def ctr256_decrypt(data: bytes, key: bytes, iv: bytearray, state: bytearray = None) -> bytes: return tgcrypto.ctr256_decrypt(data, key, iv, state or bytearray(1)) def xor(a: bytes, b: bytes) -> bytes: return int.to_bytes( int.from_bytes(a, "big") ^ int.from_bytes(b, "big"), len(a), "big", ) except ImportError: import pyaes log.warning( "TgCrypto is missing! " "Pyrogram will work the same, but at a much slower speed. " "More info: https://docs.pyrogram.org/topics/speedups" ) def ige256_encrypt(data: bytes, key: bytes, iv: bytes) -> bytes: return ige(data, key, iv, True) def ige256_decrypt(data: bytes, key: bytes, iv: bytes) -> bytes: return ige(data, key, iv, False) def ctr256_encrypt(data: bytes, key: bytes, iv: bytearray, state: bytearray = None) -> bytes: return ctr(data, key, iv, state or bytearray(1)) def ctr256_decrypt(data: bytes, key: bytes, iv: bytearray, state: bytearray = None) -> bytes: return ctr(data, key, iv, state or bytearray(1)) def xor(a: bytes, b: bytes) -> bytes: return int.to_bytes( int.from_bytes(a, "big") ^ int.from_bytes(b, "big"), len(a), "big", ) def ige(data: bytes, key: bytes, iv: bytes, encrypt: bool) -> bytes: cipher = pyaes.AES(key) iv_1 = iv[:16] iv_2 = iv[16:] data = [data[i: i + 16] for i in range(0, len(data), 16)] if encrypt: for i, chunk in enumerate(data): iv_1 = data[i] = xor(cipher.encrypt(xor(chunk, iv_1)), iv_2) iv_2 = chunk else: for i, chunk in enumerate(data): iv_2 = data[i] = xor(cipher.decrypt(xor(chunk, iv_2)), iv_1) iv_1 = chunk return b"".join(data) def ctr(data: bytes, key: bytes, iv: bytearray, state: bytearray) -> bytes: cipher = pyaes.AES(key) out = bytearray(data) chunk = cipher.encrypt(iv) for i in range(0, len(data), 16): for j in range(0, min(len(data) - i, 16)): out[i + j] ^= chunk[state[0]] state[0] += 1 if state[0] >= 16: state[0] = 0 if state[0] == 0: for k in range(15, -1, -1): try: iv[k] += 1 break except ValueError: iv[k] = 0 chunk = cipher.encrypt(iv) return out
PypiClean
/JWaves-0.0.1.tar.gz/JWaves-0.0.1/src/S02_firstTestsB.py
import numpy as np from scipy.io import loadmat import matplotlib.pyplot as plt import sys sys.path.append('.') from RPA import Lattice, System, Units from MagneticFormFactor import formFactor from scipy.ndimage import gaussian_filter import time start = time.time() ######### lat=[10.088,11.943,3.42]# T=0.1# in K Hc=0.25; H=[0,0,Hc];# in [T] J1=0#-0.1484/3.9892**2# J2=0#-0.1023/3.9892**2# facO21=0# J1S1=np.zeros((8,8)) J1S1[:3,:3]=J1*np.eye(3) J2S1=np.zeros((8,8)) J2S1[:3,:3]=J2*np.eye(3) # 'Jx' 'Jy' 'Jz' 'O20' 'O2+1' 'O2-1' 'O2+2' 'O2-2' # 1 2 3 4 5 6 7 8 J2S1[4,4]=facO21; ## epsilon=0.05; omega=np.arange(0,9,epsilon/3)# wid=0.1 # ElasticThreshold=1e-2 Qz=np.arange(0,2,0.075) Qy=np.zeros_like(Qz)#((len(Qz))) Qx=np.zeros_like(Qz) QRLU=np.asarray([Qx,Qy,Qz]) positions = np.asarray([[0.4242,0.1110,0.25], [0.5758,0.8890,0.75]]) lattice = Lattice(S=[15/2,15/2],g = [4/3,4/3],active = [1,1],positions = positions, label = ['Dy1','Dy1'], site = [1,1], lattice = [10.0884,11.9427,3.4289,90,90,90]) S = System(temperature=T,magneticField=H, lattice=lattice) S.lattice.generateCouplings(maxDistance = 3.51) distances = [3.5082,3.4289] Js = [J1S1,J2S1] S.lattice.addExchangeInteractions(Js,distances) S.operators = loadmat(r'C:\Users\lass_j\Documents\Software\RPA\RPA for Simon\operators\operatorsSHO_sxtal0p45.mat')['operator'] S.operators = np.asarray([S.operators,S.operators]) S.energies = np.array([[0,0.626,2.12,3.07,5.36,8.22]]) # InitialDistributionSite1_Ext sizeS = 3.9892 doubling = False # Along c only config = 1 # not sure Ncell = 1 nExt = [1,1,Ncell] numOperators = S.operators.shape[-1] site1 = np.zeros((numOperators,len(S.lattice.r))) site1[2,0] = sizeS site1[2,1] = site1[2,0]*config fc = site1 fullConfiguration = np.repeat(fc, Ncell,axis=-1) spins = np.repeat(site1[:3],Ncell,axis=-1) if doubling: # add AFM along c tt=np.ones_like(fullConfiguration) tt[:,-2::4]=-tt[:,-2::4] tt[:,-1::4]=-tt[:,-1::4] fullConfiguration=fullConfiguration*tt; S.lattice.NCell = int(np.product(nExt)) S.lattice.nExt = nExt fig,Ax = plt.subplots(nrows=2,ncols=2,figsize=(14,10)) Ax = Ax.flatten() for field,ax in zip([0.0,0.1,0.5,1.0],Ax): S.magneticField = np.array([0.0,field,Hc]) S.fullConfiguration = fullConfiguration S.solveSelfConsistency() print(np.round(np.real(S.fullConfiguration),4)) S.calculateChi0(omega) Y = [np.imag(S.Chi0_inelastic[i,i,0,:]) for i in range(3)] Y.append(np.sum(Y,axis=0)) Y = np.asarray(Y) for y,c,title in zip(Y,['b','r','g','k--'],['Jx','Jy','Jz','Jtot']): ax.plot(omega,y,c,label=title) ax.set_xlabel('Energy [meV]') ax.set_ylabel('Imag(X_0 inelastic)') ax.set_title('Magnetic Field = ('+', '.join(['{:}'.format(x) for x in S.magneticField])+') [T]') ax.legend() fig.tight_layout() ## hh = np.arange(0,2,0.05); sqw=np.zeros((len(hh),len(omega))) for hi,h in enumerate(hh): S.magneticField = np.array([0,h,Hc]); S.solveSelfConsistency() S.calculateChi0(omega) sqw[hi,:]=np.imag(np.sum(S.Chi0_inelastic[[0,1,2],[0,1,2],0,:],axis=0).T) fig2,ax2 = plt.subplots() p = ax2.pcolormesh(hh,omega,sqw.T,shading='auto')#;shading flat ax2.set_xlabel('H (T)') ax2.set_ylabel('Energy (meV)') p.set_clim(0,500) fig2.colorbar(p)
PypiClean
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/toolkits/feature_engineering/_numeric_imputer.py
import graphlab as _gl # Toolkit utils. from graphlab.toolkits.feature_engineering._feature_engineering import Transformer from graphlab.toolkits._model import _get_default_options_wrapper from graphlab.toolkits._internal_utils import _toolkit_repr_print from graphlab.toolkits._internal_utils import _precomputed_field from graphlab.util import _raise_error_if_not_of_type # Feature engineering utils from . import _internal_utils from ._doc_utils import republish_docs _fit_examples_doc = ''' # Create the data >>> sf = graphlab.SFrame({'a' : [1,2,3,4,5], 'b' : [2,3,4,2,3]}) # Create the imputer for the features ['a', 'b']. >>> imputer = graphlab.feature_engineering.NumericImputer( features = ['a', 'b'], strategy = 'mean') # Learn the sufficient stats required for imputation of each column. >>> imputer = imputer.fit(sf) # Return the list of mean for each of the columns. >>> imputer['means'] Columns: a float b float Rows: 1 Data: +-----+-----+ | a | b | +-----+-----+ | 3.0 | 2.8 | +-----+-----+ [1 rows x 2 columns] ''' _fit_transform_examples_doc = ''' # Create the data >>> sf = graphlab.SFrame({'a' : [1,2,None,4,5], 'b' : [2,3,None,5,6]}) # Create the imputer for the features ['a', 'b']. >>> imputer = graphlab.feature_engineering.NumericImputer( features = ['a', 'b']) # Fit and transform on the same data. >>> transformed_sf = imputer.fit_transform(sf) Columns: a float b float Rows: 5 Data: +-----+-----+ | a | b | +-----+-----+ | 1.0 | 2.0 | | 2.0 | 3.0 | | 3.0 | 4.0 | | 4.0 | 5.0 | | 5.0 | 6.0 | +-----+-----+ [5 rows x 2 columns] ''' _transform_examples_doc = ''' # Integer/Float columns # ---------------------------------------------------------------------- # Create the data >>> sf = graphlab.SFrame({'a' : [1,2,4,5], 'b' : [2,3,5,6]}) # Create the imputer for the features ['a', 'b']. >>> imputer = graphlab.feature_engineering.NumericImputer( features = ['a', 'b']).fit(sf) # Impute the missing values in new data. >>> sf_new = graphlab.SFrame({'a' : [1,2,None,4,5], 'b' : [2,3,None,5,6]}) >>> transformed_sf = imputer.transform(sf_new) Columns: a float b float Rows: 5 Data: +-----+-----+ | a | b | +-----+-----+ | 1.0 | 2.0 | | 2.0 | 3.0 | | 3.0 | 4.0 | | 4.0 | 5.0 | | 5.0 | 6.0 | +-----+-----+ [5 rows x 2 columns] # Lists can contain numeric and None values. # ---------------------------------------------------------------------- >>> sf = graphlab.SFrame({'lst': [[1, 2], [2, 3], [3, 4], [5, 6], [6, 7]]}) # Construct and fit an imputer for the column['lst']. >>> from graphlab.toolkits.feature_engineering import NumericImputer >>> imputer = graphlab.feature_engineering.create(sf, NumericImputer(features = ['lst'])) # Impute the missing values in the new data. >>> new_sf = graphlab.SFrame({'lst': [[1, 2], [2, 3], [3, 4], [None, None], [5, 6], [6, 7]]}) >>> transformed_sf = imputer.transform(sf) Columns: a list Rows: 6 Data: +------------+ | lst | +------------+ | [1, 2] | | [2, 3] | | [3, 4] | | [3.4, 4.4] | | [5, 6] | | [6, 7] | +------------+ [6 rows x 1 columns] # Dictionaries (Assumes sparse data format) # ---------------------------------------------------------------------- # Construct and fit an imputer for the column ['dict']. >>> from graphlab.toolkits.feature_engineering import NumericImputer >>> sf = graphlab.SFrame({'dict': [{'a':1, 'b': 2, 'c': 3}, {'a':0, 'b': 0, 'c': 0}, {'b':4, 'c': 0, 'd': 6}]}) >>> imputer = graphlab.toolkits.feature_engineering.create(sf, NumericImputer(features = ['dict'])) # Impute the missing values for the new data. >>> sf = graphlab.SFrame({'dict': [{'a':1, 'b': 2, 'c': 3}, None, {'b':4, 'c': None, 'd': 6}]}) >>> transformed_sf = imputer.transform(sf) Columns: dict dict Rows: 3 Data: +-------------------------------+ | dict | +-------------------------------+ | {'a': 1, 'c': 3, 'b': 2} | | {'a': 0.3333333333333333, ... | | {'c': 1.0, 'b': 4, 'd': 6} | +-------------------------------+ [3 rows x 1 columns] ''' @republish_docs class NumericImputer(Transformer): ''' Impute missing values with feature means. Input columns to the NumericImputer must be of type *int*, *float*, *dict*, *list*, or *array.array*. For each column in the input, the transformed output is a column where the input is retained as is if: * there is no missing value. Inputs that do not satisfy the above are set to the mean value of that feature. The behavior for different input data column types is as follows: (see :func:`~graphlab.feature_engineering.NumericImputer.transform` for for examples). * **float** : If there is a missing value, it is replaced with the mean of that column. * **int** : Behaves the same way as *float*. * **list** : Each index of the list is treated as a feature column, and missing values are replaced with per-feature means. This is the same as unpacking, computing the mean, and re-packing. All elements must be of type *float*, *int*, or *None*. See :func:`~graphlab.SFrame.pack_columns` for more information. * **array** : Same behavior as *list* * **dict** : Same behavior as *list*, except keys not present in a particular row are implicitly interpreted as having the value 0. This makes the *dict* type a sparse representation of a vector. Parameters ---------- features : list[str] | str | None, optional Name(s) of feature column(s) to be transformed. If set to None, then all feature columns are used. excluded_features : list[str] | str | None, optional Name(s) of feature columns in the input dataset to be ignored. Either `excluded_features` or `features` can be passed, but not both. strategy: 'auto'|'mean', optional The strategy with which to perform imputation.Currently can be 'auto' or 'mean'. Both currently perform mean imputation. output_column_prefix : str, optional The prefix to use for the column name of each transformed column. When provided, the transformation will add columns to the input data, where the new name is "`output_column_prefix`.original_column_name". If `output_column_prefix=None` (default), then the output column name is the same as the original feature column name. Returns ------- out : NumericImputer A NumericImputer object which is initialized with the defined parameters. See Also -------- graphlab.toolkits.feature_engineering._numeric_imputer.NumericImputer graphlab.toolkits.feature_engineering.create Notes ----- - If the SFrame to be transformed already contains a column with the designated output column name, then that column will be replaced with the new output. In particular, this means that `output_column_prefix=None` will overwrite the original feature columns. Examples -------- .. sourcecode:: python # Create data. >>> sf = graphlab.SFrame({'a': [1,3], 'b' : [2,4]}) # Create a transformer. >>> from graphlab.toolkits.feature_engineering import NumericImputer >>> imputer = graphlab.feature_engineering.create(sf, NumericImputer(features = ['a', 'b'], strategy = 'mean')) # Transform the data. >>> new_sf = graphlab.SFrame({'a': [1,None,3], 'b' : [2, None,4]}) >>> transformed_sf = imputer.transform(new_sf) # Save the transformer. >>> imputer.save('save-path') # Return the means. >>> imputer['means'] Columns: a float b float Rows: 1 Data: +-----+-----+ | a | b | +-----+-----+ | 2.0 | 3.0 | +-----+-----+ [1 rows x 2 columns] ''' # Doc strings _fit_examples_doc = _fit_examples_doc _transform_examples_doc = _transform_examples_doc _fit_transform_examples_doc = _fit_transform_examples_doc # Default options get_default_options = staticmethod(_get_default_options_wrapper( '_MeanImputer', 'toolkits.feature_engineering._mean_imputer', 'MeanImputer', True)) def __init__(self, features=None, excluded_features=None, strategy='auto', output_column_prefix=None): # Process and make a copy of the features, exclude. _features, _exclude = _internal_utils.process_features( features, excluded_features) # Type checking _raise_error_if_not_of_type(strategy, [str]) # Set up options opts = { 'strategy' : strategy, 'output_column_prefix': output_column_prefix } if _exclude: opts['exclude'] = True opts['features'] = _exclude else: opts['exclude'] = False opts['features'] = _features # Initialize object proxy = _gl.extensions._MeanImputer() proxy.init_transformer(opts) super(NumericImputer, self).__init__(proxy, self.__class__) def _get_summary_struct(self): _features = _precomputed_field( _internal_utils.pretty_print_list(self.get('features'))) _exclude = _precomputed_field( _internal_utils.pretty_print_list(self.get('excluded_features'))) fields = [ ("Features", _features), ("Excluded features", _exclude), ] section_titles = ['Model fields'] return ([fields], section_titles) def __repr__(self): (sections, section_titles) = self._get_summary_struct() return _toolkit_repr_print(self, sections, section_titles, 30) @classmethod def _get_instance_and_data(cls): sf = _gl.SFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}) imputer = _gl.feature_engineering.NumericImputer( features = ['a', 'b'], strategy = 'mean') return imputer.fit(sf), sf
PypiClean
/netket-3.9.2.tar.gz/netket-3.9.2/netket/graph/lattice.py
from dataclasses import dataclass from math import pi from netket.utils.types import Array from typing import Callable, Dict, Sequence, Tuple, Union, Optional, TYPE_CHECKING import warnings import numpy as _np from netket.utils.deprecation import deprecated as _deprecated from netket.utils import HashableArray from netket.utils.float import comparable, comparable_periodic, is_approx_int from netket.utils.group import PointGroup, PermutationGroup, trivial_point_group from .graph import Graph from ._lattice_edge_logic import ( get_nn_edges, get_custom_edges, create_site_positions, CustomEdgeT, ) if TYPE_CHECKING: from .space_group import SpaceGroupBuilder PositionT = _np.ndarray CoordT = _np.ndarray class InvalidSiteError(Exception): pass class InvalidWaveVectorError(Exception): pass @dataclass class LatticeSite: """ Contains information about a single :class:`~netket.graph.Lattice` site. """ id: int """Integer ID of this site""" position: PositionT """Real-space position of this site""" basis_coord: CoordT """basis coordinates of this site""" def __repr__(self): s = ", ".join(map(str, (self.id, self.basis_coord))) return f"LatticeSite({s})" def _create_sites(basis_vectors, extent, site_offsets): basis_coords, positions = create_site_positions(basis_vectors, extent, site_offsets) sites = [ LatticeSite(id=idx, position=pos, basis_coord=coord) for idx, (coord, pos) in enumerate(zip(basis_coords, positions)) ] return sites, basis_coords, positions def deprecated(alternative): def wrapper(fn): msg = ( f"{fn.__name__} is deprecated and may be removed in the future. " f"You can use `{alternative}`` instead." ) f = _deprecated(msg)(fn) return f return wrapper REPR_TEMPLATE = """Lattice( n_nodes={}, extent={}, basis_vectors= {}, site_offsets= {}, ) """ class Lattice(Graph): r""" A lattice built by periodic arrangement of a given unit cell. The lattice is represented as a Bravais lattice with (:code:`basis_vectors`) :math:`\{a_d\}_{d=1}^D` (where :math:`D = \mathtt{ndim}` is the dimension of the lattice) and a unit cell consisting of one or more sites, The positions of those sites within the unit cell can be specified by the :code:`site_offsets` parameter. The :code:`extent` is a array where :code:`extent[d]` specifies the number of times each unit cell is translated along direction :math:`d`. The full lattice is then generated by placing a site at each of the points .. math:: R_{rq} = \sum_{d=1}^D r_d a_d + b_q \in \mathbb R^D where :math:`r_d \in \{1, \ldots, \mathtt{extent}[d]\}` and :math:`b_q = \mathtt{site\_offsets}[q]`. We also refer to :math:`q` as the `label` of the site within the unit cell. The lattice class supports three ways of addressing a specific lattice site: id An integer index that is used to identify the site in :code:`self.edges()` and also corresponds to the index of the corresponding site in sequences like :code:`self.nodes()`, :code:`self.positions` or :code:`self.basis_coords`. positions Real-space position vector :math:`R_{rq}` as defined above, which is available from :func:`~netket.graph.Lattice.positions` and can be resolved into an id via :func:`~netket.graph.Lattice.id_from_position`. basis coordinates where each site is specified by a vector :code:`[r1, ..., rD, q]` with :math:`r` being the integer vector of length :code:`ndim` specifying the cell position as multiples of the primitive vectors and the site label :math:`q` giving the number of the site within the unit cell. Basis coordinates are available from :func:`~netket.graph.Lattice.basis_coords` and can be resolved into an id via :func:`~netket.graph.Lattice.id_from_basis_coords`. """ # Initialization # ------------------------------------------------------------------------ def __init__( self, basis_vectors: _np.ndarray, extent: _np.ndarray, *, pbc: Union[bool, Sequence[bool]] = True, site_offsets: Optional[_np.ndarray] = None, atoms_coord: Optional[_np.ndarray] = None, distance_atol: float = 1e-5, point_group: Optional[PointGroup] = None, max_neighbor_order: Optional[int] = None, custom_edges: Optional[Sequence[CustomEdgeT]] = None, ): """ Constructs a new ``Lattice`` given its side length and the features of the unit cell. Args: basis_vectors: The basis vectors of the lattice. Should be an array of shape `(ndim, ndim)` where each `row` is a basis vector. extent: The number of copies of the unit cell; needs to be an array of length `ndim`. pbc: If ``True`` then the constructed lattice will have periodic boundary conditions, otherwise open boundary conditions are imposed. Can also be an boolean sequence of length `ndim`, indicating either open or closed boundary conditions separately for each direction. site_offsets: The position offsets of sites in the unit cell (one site at the origin by default). distance_atol: Distance below which spatial points are considered equal for the purpose of identifying nearest neighbors. point_group: Default `PointGroup` object for constructing space groups max_neighbor_order: For :code:`max_neighbor_order == k`, edges between up to :math:`k`-nearest neighbor sites (measured by their Euclidean distance) are included in the graph. The edges can be distinguished by their color, which is set to :math:`k - 1` (so nearest-neighbor edges have color 0). By default, nearest neighbours (:code:`max_neighbor_order=1`) are autogenerated unless :code:`custom_edges` is passed. custom_edges: (Optional) Lists all edges starting in one unit cell, which are repeated in every unit cell of the constructed lattice. Should be a list of tuples; each tuple should contain the following: * index of the starting point in the unit cell * index of the endpoint in the unit cell * vector pointing from the former to the latter * color of the edge (optional) If colors are not supplied, they are assigned sequentially starting from 0. Cannot be used together with `max_neighbor_order`. Examples: Constructs a Kagome lattice with 3 × 3 unit cells: >>> import numpy as np >>> from netket.graph import Lattice >>> # Hexagonal lattice basis >>> sqrt3 = np.sqrt(3.0) >>> basis = np.array([ ... [1.0, 0.0], ... [0.5, sqrt3 / 2.0], ... ]) >>> # Kagome unit cell >>> cell = np.array([ ... basis[0] / 2.0, ... basis[1] / 2.0, ... (basis[0]+basis[1])/2.0 ... ]) >>> g = Lattice(basis_vectors=basis, site_offsets=cell, extent=[3, 3]) >>> print(g.n_nodes) 27 >>> print(g.basis_coords[:6]) [[0 0 0] [0 0 1] [0 0 2] [0 1 0] [0 1 1] [0 1 2]] >>> print(g.positions[:6]) [[0.5 0. ] [0.25 0.4330127 ] [0.75 0.4330127 ] [1. 0.8660254 ] [0.75 1.29903811] [1.25 1.29903811]] Constructs a rectangular lattice with distinct horizontal and vertical edges: >>> import numpy as np >>> from netket.graph import Lattice >>> basis = np.array([ ... [1.0,0.0], ... [0.0,0.5], ... ]) >>> custom_edges = [ ... (0, 0, [1.0,0.0], 0), ... (0, 0, [0.0,0.5], 1), ... ] >>> g = Lattice(basis_vectors=basis, pbc=False, extent=[4,6], ... custom_edges=custom_edges) >>> print(g.n_nodes) 24 >>> print(len(g.edges(filter_color=0))) 18 >>> print(len(g.edges(filter_color=1))) 20 """ # Clean input parameters self._basis_vectors = self._clean_basis(basis_vectors) self._ndim = self._basis_vectors.shape[1] self._site_offsets, site_pos_fractional = self._clean_site_offsets( site_offsets, atoms_coord, self._basis_vectors, ) self._pbc = self._clean_pbc(pbc, self._ndim) self._extent = _np.asarray(extent, dtype=int) self._lattice_dims = _np.expand_dims(self._extent, 1) * self.basis_vectors self._inv_dims = _np.linalg.inv(self._lattice_dims) self._point_group = point_group # Generate sites self._sites, self._basis_coords, self._positions = _create_sites( self._basis_vectors, self._extent, self._site_offsets, ) self._basis_coord_to_site = { HashableArray(p.basis_coord): p.id for p in self._sites } int_positions = self._to_integer_position(self._positions) self._int_position_to_site = { HashableArray(pos): index for index, pos in enumerate(int_positions) } # Generate edges if custom_edges is not None: if max_neighbor_order is not None: raise ValueError( "custom_edges and max_neighbor_order cannot be specified at the same time" ) colored_edges = get_custom_edges( self._basis_vectors, self._extent, self._site_offsets, self._pbc, distance_atol, custom_edges, ) else: if max_neighbor_order is None: max_neighbor_order = 1 colored_edges = get_nn_edges( self._basis_vectors, self._extent, self._site_offsets, self._pbc, distance_atol, max_neighbor_order, ) super().__init__(colored_edges, len(self._sites)) @staticmethod def _clean_basis(basis_vectors): """Check and convert `basis_vectors` init argument.""" basis_vectors = _np.asarray(basis_vectors) if basis_vectors.ndim != 2: raise ValueError( "'basis_vectors' must have ndim==2 (as array of primitive vectors)" ) if basis_vectors.shape[0] != basis_vectors.shape[1]: raise ValueError("The number of primitive vectors must match their length") return basis_vectors # TODO: remove atoms_coord argument. @staticmethod def _clean_site_offsets(site_offsets, atoms_coord, basis_vectors): """Check and convert `site_offsets` init argument.""" if atoms_coord is not None and site_offsets is not None: raise ValueError( "atoms_coord is deprecated and replaced by site_offsets, " "so both cannot be specified at the same time." ) if atoms_coord is not None: warnings.warn( "atoms_coord is deprecated and may be removed in future versions, " "please use site_offsets instead", FutureWarning, stacklevel=3, ) site_offsets = atoms_coord if site_offsets is None: site_offsets = _np.zeros(basis_vectors.shape[0])[None, :] site_offsets = _np.asarray(site_offsets) fractional_coords = site_offsets @ _np.linalg.inv(basis_vectors) fractional_coords_int = comparable_periodic(fractional_coords) # Check for duplicates (also across unit cells) uniques, idx = _np.unique(fractional_coords_int, axis=0, return_index=True) if len(site_offsets) != len(uniques): site_offsets = site_offsets[idx] fractional_coords = fractional_coords[idx] fractional_coords_int = fractional_coords_int[idx] warnings.warn( "Some atom positions are not unique. Duplicates were dropped, and " f"now atom positions are {site_offsets}", UserWarning, ) # Check if any site is outside primitive cell (may cause KDTree to malfunction) if _np.any(fractional_coords_int < comparable(0.0)) or _np.any( fractional_coords_int > comparable(1.0) ): warnings.warn( "Some sites were specified outside the primitive unit cell. This may" "cause errors in automatic edge finding.", UserWarning, ) return site_offsets, fractional_coords @staticmethod def _clean_pbc(pbc, ndim): """Check and convert `pbc` init argument.""" if isinstance(pbc, bool): return _np.array([pbc] * ndim, dtype=bool) elif ( not isinstance(pbc, Sequence) or len(pbc) != ndim or not all(isinstance(b, bool) for b in pbc) ): raise ValueError( "pbc must be either a boolean or a sequence of booleans with length" "equal to the lattice dimension" ) else: return _np.asarray(pbc, dtype=bool) # Properties # ------------------------------------------------------------------------ @property def basis_vectors(self): """Basis vectors of the lattice""" return self._basis_vectors @property def site_offsets(self): """Position offsets of sites in the unit cell""" return self._site_offsets @property def ndim(self): """Dimension of the lattice""" return self._ndim @property def pbc(self): """ Array of bools such that `pbc[d]` indicates whether dimension d has periodic boundaries. """ return self._pbc @property def extent(self): """ Extent of the lattice """ return self._extent @property def sites(self) -> Sequence[LatticeSite]: """Sequence of lattice site objects""" return self._sites @property def positions(self) -> PositionT: """Real-space positions of all lattice sites""" return self._positions @property def basis_coords(self) -> CoordT: """basis coordinates of all lattice sites""" return self._basis_coords # Site lookup # ------------------------------------------------------------------------ def _to_integer_position(self, positions: PositionT) -> Array: frac_positions = _np.matmul(positions, self._inv_dims) return comparable_periodic(frac_positions, self.pbc) @staticmethod def _get_id_from_dict( dict: Dict[HashableArray, int], key: Array ) -> Union[int, Array]: try: if key.ndim == 1: return dict[HashableArray(key)] elif key.ndim == 2: return _np.array([dict[HashableArray(k)] for k in key]) else: raise ValueError("Input needs to be rank 1 or rank 2 array") except KeyError as e: raise InvalidSiteError( "Some coordinates do not correspond to a valid lattice site" ) from e def id_from_position(self, position: PositionT) -> Union[int, Array]: """ Returns the id for a site at the given position. When passed a rank-2 array where each row is a position, returns an array of the corresponding ids. Throws an `InvalidSiteError` if any of the positions do not correspond to a site. """ int_pos = self._to_integer_position(position) ids = self._get_id_from_dict(self._int_position_to_site, int_pos) return ids def id_from_basis_coords(self, basis_coords: CoordT) -> Union[int, Array]: """ Return the id for a site at the given basis coordinates. When passed a rank-2 array where each row is a coordinate vector, returns an array of the corresponding ids. Throws an `InvalidSiteError` if any of the coords do not correspond to a site. """ key = _np.asarray(basis_coords) return self._get_id_from_dict(self._basis_coord_to_site, key) def position_from_basis_coords(self, basis_coords: CoordT) -> PositionT: """ Return the position of the site with given basis coordinates. When passed a rank-2 array where each row is a coordinate vector, this method returns an array of the corresponding positions. Throws an `InvalidSiteError` if no site is found for any of the coordinates. """ ids = self.id_from_basis_coords(basis_coords) return self.positions[ids] def to_reciprocal_lattice(self, ks: Array) -> Array: """ Converts wave vectors from Cartesian axes to reciprocal lattice vectors. Arguments: ks: wave vectors in Cartesian axes. Multidimensional arrays are accepted, the Cartesian coordinates must form the last dimension. Returns: The same wave vectors in the reciprocal basis **of the simulation box.** Valid wave vector components in this basis are integers in (periodic BCs) or zero (in open BCs). Throws an `InvalidWaveVectorError` if any of the supplied wave vectors are not reciprocal lattice vectors of the simulation box. """ # Ensure that ks has at least 2 dimensions ks = _np.asarray(ks) if ks.ndim == 1: ks = ks[_np.newaxis, :] result = ks @ self._lattice_dims.T / (2 * pi) # Check that these are integers is_valid = is_approx_int(result) if not _np.all(is_valid): raise InvalidWaveVectorError( "Some wave vectors are not reciprocal lattice vectors of the simulation" "box spanned by\n" + "\n".join( [ str(self._lattice_dims[i]) + (" (PBC)" if self.pbc[i] else " (OBC)") for i in range(self.ndim) ] ) ) result = _np.asarray(_np.rint(result), dtype=int) # For axes with non-periodic BCs, the k-component must be 0 is_valid = _np.logical_or(self.pbc, result == 0) if not _np.all(is_valid): raise InvalidWaveVectorError( "Some wave vectors are inconsistent with open boundary conditions" ) return result # Generating space groups # ----------------------------------------------------------------------- def space_group_builder( self, point_group: Optional[PointGroup] = None ) -> "SpaceGroupBuilder": """ Returns a `SpaceGroupBuilder` object that represents the spatial symmetries of `self`. Arguments: point_group: a `PointGroup` object describing the point-group symmetries of `self`. Optional, if not supplied, the `PointGroup` object provided at construction is used. Returns: A `SpaceGroupBuilder` object that generates `PermutationGroup`s encoding the action of `point_group`, the translation group of `self`, and the space group obtained as their semidirect product as permutations of the sites of `self`. It also yields space group irreps for symmetrising wave functions. """ from .space_group import SpaceGroupBuilder if point_group is None: if isinstance(self._point_group, PointGroup): point_group = self._point_group elif isinstance(self._point_group, Callable): self._point_group = self._point_group() point_group = self._point_group else: raise RuntimeError( "space_group_builder() missing required argument 'point_group'\n" "(lattice has no default point group)" ) return SpaceGroupBuilder(self, point_group) def space_group(self, point_group: Optional[PointGroup] = None) -> PermutationGroup: """ Returns the space group generated by the translation symmetries of `self` and the elements of `point_group` as a `PermutationGroup` acting on the sites of `self`. If no `point_group` is specified, uses the point group provided upon construction. """ return self.space_group_builder(point_group).space_group def point_group(self, point_group: Optional[PointGroup] = None) -> PermutationGroup: """ Returns the action of `point_group` on the sites of `self` as a `PermutationGroup`. If no `point_group` is specified, uses the point group provided upon construction. """ return self.space_group_builder(point_group).point_group def rotation_group( self, point_group: Optional[PointGroup] = None ) -> PermutationGroup: """ Returns the action of rotations (i.e. symmetries with determinant +1) in `point_group` on the sites of `self` as a `PermutationGroup`. If no `point_group` is specified, uses the point group provided upon construction. """ return self.space_group_builder(point_group).rotation_group def translation_group( self, dim: Optional[Union[int, Sequence[int]]] = None ) -> PermutationGroup: """ Returns the group of lattice translations of `self` as a `PermutationGroup` acting on the sites of `self`. """ return self.space_group_builder( trivial_point_group(self.ndim) ).translation_group(dim) # Output and drawing # ------------------------------------------------------------------------ def __repr__(self) -> str: return REPR_TEMPLATE.format( self.n_nodes, self._extent, str(self.basis_vectors).replace("\n", "\n" + " " * 8), str(self.site_offsets).replace("\n", "\n" + " " * 8), ) def draw( self, ax=None, figsize: Optional[Tuple[Union[int, float]]] = None, node_color: str = "#1f78b4", node_size: int = 300, edge_color: str = "k", curvature: float = 0.2, font_size: int = 12, font_color: str = "k", ): """ Draws the ``Lattice`` graph Args: ax: Matplotlib axis object. figsize: (width, height) tuple of the generated figure. node_color: String with the colour of the nodes. node_size: Area of the nodes (as in matplotlib.pyplot.scatter). edge_color: String with the colour of the edges. curvature: A Bezier curve is fit, where the "height" of the curve is `curvature` times the "length" of the curvature. font_size: fontsize of the labels for each node. font_color: Colour of the font used to label nodes. Returns: Matplotlib axis object containing the graph's drawing. """ import matplotlib.pyplot as plt # pylint: disable=import-outside-toplevel # Check if lattice is 1D or 2D... or notnetketwarnings.py if self._ndim == 1: positions = _np.pad(self.positions, (0, 1), "constant") elif self._ndim == 2: positions = self.positions else: raise ValueError( "Make sure that the graph is 1D or 2D in order to be drawn. " f" Now it is {self._ndim}D" ) if ax is None: _, ax = plt.subplots(figsize=figsize) for edge in self.edges(): x1, y1 = positions[edge[0]] x2, y2 = positions[edge[1]] annotation = ax.annotate( "", xy=(x1, y1), xycoords="data", xytext=(x2, y2), textcoords="data", arrowprops=dict( arrowstyle="-", color=edge_color, shrinkA=0, shrinkB=0, patchA=None, patchB=None, connectionstyle=f"arc3,rad={curvature}", ), ) ax.scatter( *positions.T, s=node_size, c=node_color, marker="o", zorder=annotation.get_zorder() + 1, ) for node in self.nodes(): x1, y1 = positions[node] ax.text( x1, y1, str(node), horizontalalignment="center", verticalalignment="center", fontsize=font_size, color=font_color, zorder=annotation.get_zorder() + 1, ) ax.axis("equal") return ax # Backwards compatibility # ------------------------------------------------------------------------ @deprecated("basis_coords[site_id, -1]") def atom_label(self, site_id: int) -> int: """Deprecated. please use :code:`basis_coords[site_id, -1]` instead.""" return self.basis_coords[site_id, -1] @deprecated("basis_coords[site_id, :-1]") def site_to_vector(self, site_id: int) -> CoordT: """Deprecated. please use :code:`basis_coords[site_id, :-1]` instead.""" return self.basis_coords[site_id, :-1] @deprecated("positions[site_id]") def site_to_coord(self, site_id: int) -> PositionT: """Deprecated. please use :code:`positions[site_id]` instead.""" return self.positions[site_id] @deprecated("id_from_basis_coords([*vector, 0])") def vector_to_site(self, vector: CoordT) -> int: """Deprecated. please use :code:`id_from_basis_coords([*vector, 0])` instead.""" # Note: This only gives one site within the unit cell, so that # `vector_to_site(site_to_vector(i)) == i` is _not_ true in general, # which is consistent with the behavior of the v2 lattice. return self.id_from_basis_coords([*vector, 0]) @deprecated("position_from_basis_coords([*vector, label])") def vector_to_coord(self, vector: CoordT, label: int) -> PositionT: "Deprecated. please use :code:`position_from_basis_coords([*vector, label])`." return self.position_from_basis_coords([*vector, label]) @property @deprecated("positions") def coordinates(self) -> PositionT: """Deprecated. please use :code:`positions` instead.""" return self.positions @property @deprecated("site_offsets") def atoms_coord(self) -> PositionT: """Deprecated. please use :code:`site_offsets` instead.""" return self._site_offsets
PypiClean
/BIT_Online_Code_Helper-1.0.4-py3-none-any.whl/bit-online-code-helper/bitonline/LocalTestCodeManager.py
import subprocess import os from log.LogManager import * from tqdm import tqdm class _LocalTestCodeManager: def __remove_existed_exe(self): if os.path.exists('a.exe'): os.remove('a.exe') def __compile_source_file(self, source_file_path): self.__remove_existed_exe() if not os.path.exists(source_file_path): tip(LocalTestCodeLogs.SOURCE_FILE_NOT_FOUND) return False tip(LocalTestCodeLogs.COMPILE_PENDING) compile_process = subprocess.Popen(['g++', source_file_path, '-std=c++11'], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = compile_process.communicate() compile_process.terminate() if len(err.decode()) != 0: tip(LocalTestCodeLogs.COMPILE_FAILED) return False else: tip(LocalTestCodeLogs.COMPILE_SUCCESS) return True def __local_test_code(self, problem_info): is_pass = True tip(LocalTestCodeLogs.LOCAL_TEST_BEGIN) index = 1 test_failed_case_index = [] pbar = tqdm(total=len(problem_info.test_cases_and_results), unit='cases') for test_case in problem_info.test_cases_and_results: test_process = subprocess.Popen(['a.exe'], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = test_process.communicate(test_case['test_case'].encode()) is_case_pass = out.decode() == test_case['result'] if not is_case_pass: test_failed_case_index.append(index) is_pass = False index += 1 pbar.update(1) test_process.terminate() pbar.close() if is_pass: tip(LocalTestCodeLogs.TEST_SUCCESS) else: tip(LocalTestCodeLogs.TEST_FAILURE_CASE) for index in test_failed_case_index: print(index, end=' ') print('') divide_line() return is_pass def run(self, source_file_path, problem_info): print(problem_info.title + '\n') if self.__compile_source_file(source_file_path): return self.__local_test_code(problem_info) return False local_test_code_manager = _LocalTestCodeManager()
PypiClean
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/address/fr_FR/__init__.py
from typing import Tuple from .. import Provider as AddressProvider class Provider(AddressProvider): city_suffixes = ( "Ville", "Bourg", "-les-Bains", "-sur-Mer", "-la-Forêt", "boeuf", "nec", "dan", ) city_prefixes = ("Saint", "Sainte") street_prefixes = ("rue", "rue", "chemin", "avenue", "boulevard") city_formats = ( "{{city_prefix}} {{first_name}}", "{{city_prefix}} {{first_name}}{{city_suffix}}", "{{last_name}}", "{{last_name}}", "{{last_name}}", "{{last_name}}", "{{last_name}}{{city_suffix}}", "{{last_name}}{{city_suffix}}", "{{last_name}}{{city_suffix}}", "{{last_name}}-sur-{{last_name}}", ) street_name_formats = ( "{{street_prefix}} {{last_name}}", "{{street_prefix}} {{first_name}} {{last_name}}", "{{street_prefix}} de {{last_name}}", ) street_address_formats = ( "{{street_name}}", "{{building_number}}, {{street_name}}", "{{building_number}}, {{street_name}}", "{{building_number}}, {{street_name}}", "{{building_number}}, {{street_name}}", "{{building_number}}, {{street_name}}", ) address_formats = ("{{street_address}}\n{{postcode}} {{city}}",) building_number_formats = ("%", "%#", "%#", "%#", "%##") countries = ( "Afghanistan", "Afrique du sud", "Albanie", "Algérie", "Allemagne", "Andorre", "Angola", "Anguilla", "Antarctique", "Antigua et Barbuda", "Antilles néerlandaises", "Arabie saoudite", "Argentine", "Arménie", "Aruba", "Australie", "Autriche", "Azerbaïdjan", "Bahamas", "Bahrain", "Bangladesh", "Belgique", "Belize", "Benin", "Bermudes (Les)", "Bhoutan", "Biélorussie", "Bolivie", "Bosnie-Herzégovine", "Botswana", "Bouvet (Îles)", "Brunei", "Brésil", "Bulgarie", "Burkina Faso", "Burundi", "Cambodge", "Cameroun", "Canada", "Cap Vert", "Cayman (Îles)", "Chili", "Chine (Rép. pop.)", "Christmas (Île)", "Chypre", "Cocos (Îles)", "Colombie", "Comores", "Cook (Îles)", "Corée du Nord", "Corée, Sud", "Costa Rica", "Croatie", "Cuba", "Côte d'Ivoire", "Danemark", "Djibouti", "Dominique", "Égypte", "El Salvador", "Émirats arabes unis", "Équateur", "Érythrée", "Espagne", "Estonie", "États-Unis", "Ethiopie", "Falkland (Île)", "Fidji (République des)", "Finlande", "France", "Féroé (Îles)", "Gabon", "Gambie", "Ghana", "Gibraltar", "Grenade", "Groenland", "Grèce", "Guadeloupe", "Guam", "Guatemala", "Guinée", "Guinée Equatoriale", "Guinée-Bissau", "Guyane", "Guyane française", "Géorgie", "Géorgie du Sud et Sandwich du Sud (Îles)", "Haïti", "Heard et McDonald (Îles)", "Honduras", "Hong Kong", "Hongrie", "Îles Mineures Éloignées des États-Unis", "Inde", "Indonésie", "Irak", "Iran", "Irlande", "Islande", "Israël", "Italie", "Jamaïque", "Japon", "Jordanie", "Kazakhstan", "Kenya", "Kirghizistan", "Kiribati", "Koweit", "La Barbad", "Laos", "Lesotho", "Lettonie", "Liban", "Libye", "Libéria", "Liechtenstein", "Lithuanie", "Luxembourg", "Macau", "Macédoine du Nord", "Madagascar", "Malaisie", "Malawi", "Maldives (Îles)", "Mali", "Malte", "Mariannes du Nord (Îles)", "Maroc", "Marshall (Îles)", "Martinique", "Maurice", "Mauritanie", "Mayotte", "Mexique", "Micronésie (États fédérés de)", "Moldavie", "Monaco", "Mongolie", "Montserrat", "Mozambique", "Myanmar", "Namibie", "Nauru", "Nepal", "Nicaragua", "Niger", "Nigeria", "Niue", "Norfolk (Îles)", "Norvège", "Nouvelle Calédonie", "Nouvelle-Zélande", "Oman", "Ouganda", "Ouzbékistan", "Pakistan", "Palau", "Panama", "Papouasie-Nouvelle-Guinée", "Paraguay", "Pays-Bas", "Philippines", "Pitcairn (Îles)", "Pologne", "Polynésie française", "Porto Rico", "Portugal", "Pérou", "Qatar", "Roumanie", "Royaume-Uni", "Russie", "Rwanda", "Rép. Dém. du Congo", "République centrafricaine", "République Dominicaine", "République tchèque", "Réunion (La)", "Sahara Occidental", "Saint Pierre et Miquelon", "Saint Vincent et les Grenadines", "Saint-Kitts et Nevis", "Saint-Marin (Rép. de)", "Sainte Hélène", "Sainte Lucie", "Samoa", "Samoa", "Seychelles", "Sierra Leone", "Singapour", "Slovaquie", "Slovénie", "Somalie", "Soudan", "Sri Lanka", "Suisse", "Suriname", "Suède", "Svalbard et Jan Mayen (Îles)", "Swaziland", "Syrie", "São Tomé et Príncipe (Rép.)", "Sénégal", "Tadjikistan", "Taiwan", "Tanzanie", "Tchad", "Territoire britannique de l'océan Indien", "Territoires français du sud", "Thailande", "Timor", "Togo", "Tokelau", "Tonga", "Trinité et Tobago", "Tunisie", "Turkménistan", "Turks et Caïques (Îles)", "Turquie", "Tuvalu", "Ukraine", "Uruguay", "Vanuatu", "Vatican (Etat du)", "Venezuela", "Vierges (Îles)", "Vierges britanniques (Îles)", "Vietnam", "Wallis et Futuna (Îles)", "Yemen", "Yougoslavie", "Zambie", "Zaïre", "Zimbabwe", ) regions = ( "Alsace", "Aquitaine", "Auvergne", "Bourgogne", "Bretagne", "Centre", "Champagne-Ardenne", "Corse", "Franche-Comté", "Île-de-France", "Languedoc-Roussillon", "Limousin", "Lorraine", "Midi-Pyrénées", "Nord-Pas-de-Calais", "Basse-Normandie", "Haute-Normandie", "Pays-de-Loire", "Picardie", "Poitou-Charentes", "Province-Alpes-Côte d'Azur", "Rhone-Alpes", "Guadeloupe", "Martinique", "Guyane", "Réunion", "Saint-Pierre-et-Miquelon", "Mayotte", "Saint-Barthélémy", "Saint-Martin", "Wallis-et-Futuna", "Polynésie française", "Nouvelle-Calédonie", ) departments = ( ("01", "Ain"), ("02", "Aisne"), ("03", "Allier"), ("04", "Alpes-de-Haute-Provence"), ("05", "Hautes-Alpes"), ("06", "Alpes-Maritimes"), ("07", "Ardèche"), ("08", "Ardennes"), ("09", "Ariège"), ("10", "Aube"), ("11", "Aude"), ("12", "Aveyron"), ("13", "Bouches-du-Rhône"), ("14", "Calvados"), ("15", "Cantal"), ("16", "Charente"), ("17", "Charente-Maritime"), ("18", "Cher"), ("19", "Corrèze"), ("2A", "Corse-du-Sud"), ("2B", "Haute-Corse"), ("21", "Côte-d'Or"), ("22", "Côtes-d'Armor"), ("23", "Creuse"), ("24", "Dordogne"), ("25", "Doubs"), ("26", "Drôme"), ("27", "Eure"), ("28", "Eure-et-Loir"), ("29", "Finistère"), ("30", "Gard"), ("31", "Haute-Garonne"), ("32", "Gers"), ("33", "Gironde"), ("34", "Hérault"), ("35", "Ille-et-Vilaine"), ("36", "Indre"), ("37", "Indre-et-Loire"), ("38", "Isère"), ("39", "Jura"), ("40", "Landes"), ("41", "Loir-et-Cher"), ("42", "Loire"), ("43", "Haute-Loire"), ("44", "Loire-Atlantique"), ("45", "Loiret"), ("46", "Lot"), ("47", "Lot-et-Garonne"), ("48", "Lozère"), ("49", "Maine-et-Loire"), ("50", "Manche"), ("51", "Marne"), ("52", "Haute-Marne"), ("53", "Mayenne"), ("54", "Meurthe-et-Moselle"), ("55", "Meuse"), ("56", "Morbihan"), ("57", "Moselle"), ("58", "Nièvre"), ("59", "Nord"), ("60", "Oise"), ("61", "Orne"), ("62", "Pas-de-Calais"), ("63", "Puy-de-Dôme"), ("64", "Pyrénées-Atlantiques"), ("65", "Hautes-Pyrénées"), ("66", "Pyrénées-Orientales"), ("67", "Bas-Rhin"), ("68", "Haut-Rhin"), ("69", "Rhône"), ("70", "Haute-Saône"), ("71", "Saône-et-Loire"), ("72", "Sarthe"), ("73", "Savoie"), ("74", "Haute-Savoie"), ("75", "Paris"), ("76", "Seine-Maritime"), ("77", "Seine-et-Marne"), ("78", "Yvelines"), ("79", "Deux-Sèvres"), ("80", "Somme"), ("81", "Tarn"), ("82", "Tarn-et-Garonne"), ("83", "Var"), ("84", "Vaucluse"), ("85", "Vendée"), ("86", "Vienne"), ("87", "Haute-Vienne"), ("88", "Vosges"), ("89", "Yonne"), ("90", "Territoire de Belfort"), ("91", "Essonne"), ("92", "Hauts-de-Seine"), ("93", "Seine-Saint-Denis"), ("94", "Val-de-Marne"), ("95", "Val-d'Oise"), ("971", "Guadeloupe"), ("972", "Martinique"), ("973", "Guyane"), ("974", "La Réunion"), ("976", "Mayotte"), ) def street_prefix(self) -> str: """ :example: 'rue' """ return self.random_element(self.street_prefixes) def city_prefix(self) -> str: """ :example: 'rue' """ return self.random_element(self.city_prefixes) def administrative_unit(self) -> str: """ :example: 'Guadeloupe' """ return self.random_element(self.regions) region = administrative_unit def department(self) -> Tuple[str, str]: """ Randomly returns a french department ('departmentNumber' , 'departmentName'). :example: ('2B' . 'Haute-Corse') """ return self.random_element(self.departments) def department_name(self) -> str: """ Randomly returns a french department name. :example: 'Ardèche' """ return self.department()[1] def department_number(self) -> str: """ Randomly returns a french department number. :example: '59' """ return self.department()[0] def postcode(self) -> str: """ Randomly returns a postcode generated from existing french department number. exemple: '33260' """ department = self.department_number() if department in ["2A", "2B"]: department = "20" return f"{department}{self.random_number(digits=5 - len(department), fix_len=True)}"
PypiClean
/MedPy-0.4.0.tar.gz/MedPy-0.4.0/bin/medpy_graphcut_voxel.py
# build-in modules from argparse import RawTextHelpFormatter import argparse import logging import os # third-party modules import scipy # path changes # own modules from medpy.core import ArgumentError, Logger from medpy.io import load, save, header from medpy import graphcut from medpy.graphcut.wrapper import split_marker # information __author__ = "Oskar Maier" __version__ = "r0.3.1, 2012-03-23" __email__ = "[email protected]" __status__ = "Release" __description__ = """ Perform a binary graph cut using Boykov's max-flow/min-cut algorithm. This implementation does only compute a boundary term and does not use any regional term. The desired boundary term can be selected via the --boundary argument. Depending on the selected term, an additional image has to be supplied as badditional. In the case of the difference of means, it is the original image. Furthermore the algorithm requires a binary image with foreground markers and a binary image with background markers. Additionally a filename for the created binary mask marking foreground and background has to be supplied. Note that the input images must be of the same dimensionality, otherwise an exception is thrown. Note to take into account the input images orientation. Note that the quality of the resulting segmentations depends also on the quality of the supplied markers. Copyright (C) 2013 Oskar Maier This program comes with ABSOLUTELY NO WARRANTY; This is free software, and you are welcome to redistribute it under certain conditions; see the LICENSE file or <http://www.gnu.org/licenses/> for details. """ # code def main(): # parse cmd arguments parser = getParser() parser.parse_args() args = getArguments(parser) # prepare logger logger = Logger.getInstance() if args.debug: logger.setLevel(logging.DEBUG) elif args.verbose: logger.setLevel(logging.INFO) # check if output image exists if not args.force: if os.path.exists(args.output): logger.warning('The output image {} already exists. Exiting.'.format(args.output)) exit(-1) # select boundary term ['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow'] if 'diff_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_linear logger.info('Selected boundary term: linear difference of intensities') elif 'diff_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_exponential logger.info('Selected boundary term: exponential difference of intensities') elif 'diff_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_division logger.info('Selected boundary term: divided difference of intensities') elif 'diff_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_difference_power logger.info('Selected boundary term: power based / raised difference of intensities') elif 'max_linear' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_linear logger.info('Selected boundary term: linear maximum of intensities') elif 'max_exp' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_exponential logger.info('Selected boundary term: exponential maximum of intensities') elif 'max_div' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_division logger.info('Selected boundary term: divided maximum of intensities') elif 'max_pow' == args.boundary: boundary_term = graphcut.energy_voxel.boundary_maximum_power logger.info('Selected boundary term: power based / raised maximum of intensities') # load input images badditional_image_data, reference_header = load(args.badditional) markers_image_data, _ = load(args.markers) # split marker image into fg and bg images fgmarkers_image_data, bgmarkers_image_data = split_marker(markers_image_data) # check if all images dimensions are the same if not (badditional_image_data.shape == fgmarkers_image_data.shape == bgmarkers_image_data.shape): logger.critical('Not all of the supplied images are of the same shape.') raise ArgumentError('Not all of the supplied images are of the same shape.') # extract spacing if required if args.spacing: spacing = header.get_pixel_spacing(reference_header) logger.info('Taking spacing of {} into account.'.format(spacing)) else: spacing = False # generate graph logger.info('Preparing BK_MFMC C++ graph...') gcgraph = graphcut.graph_from_voxels(fgmarkers_image_data, bgmarkers_image_data, boundary_term = boundary_term, boundary_term_args = (badditional_image_data, args.sigma, spacing)) # execute min-cut logger.info('Executing min-cut...') maxflow = gcgraph.maxflow() logger.debug('Maxflow is {}'.format(maxflow)) # reshape results to form a valid mask logger.info('Applying results...') result_image_data = scipy.zeros(bgmarkers_image_data.size, dtype=scipy.bool_) for idx in range(len(result_image_data)): result_image_data[idx] = 0 if gcgraph.termtype.SINK == gcgraph.what_segment(idx) else 1 result_image_data = result_image_data.reshape(bgmarkers_image_data.shape) # save resulting mask save(result_image_data.astype(scipy.bool_), args.output, reference_header, args.force) logger.info('Successfully terminated.') def getArguments(parser): "Provides additional validation of the arguments collected by argparse." return parser.parse_args() def getParser(): "Creates and returns the argparse parser object." parser = argparse.ArgumentParser(description=__description__, formatter_class=RawTextHelpFormatter) parser.add_argument('sigma', type=float, help='The sigma required for the boundary terms.') parser.add_argument('badditional', help='The additional image required by the boundary term. See there for details.') parser.add_argument('markers', help='Image containing the foreground (=1) and background (=2) markers.') parser.add_argument('output', help='The output image containing the segmentation.') parser.add_argument('--boundary', default='diff_exp', help='The boundary term to use. Note that the ones prefixed with diff_ require the original image, while the ones prefixed with max_ require the gradient image.', choices=['diff_linear', 'diff_exp', 'diff_div', 'diff_pow', 'max_linear', 'max_exp', 'max_div', 'max_pow']) parser.add_argument('-s', dest='spacing', action='store_true', help='Set this flag to take the pixel spacing of the image into account. The spacing data will be extracted from the baddtional image.') parser.add_argument('-f', dest='force', action='store_true', help='Set this flag to silently override files that exist.') parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.') parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.') return parser if __name__ == "__main__": main()
PypiClean
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/trans/views/settings.py
from django.conf import settings from django.contrib.auth.decorators import login_required from django.core.exceptions import ObjectDoesNotExist, PermissionDenied, ValidationError from django.http import FileResponse, Http404, JsonResponse from django.shortcuts import get_object_or_404, redirect from django.utils.decorators import method_decorator from django.utils.translation import gettext as _ from django.views.decorators.cache import never_cache from django.views.decorators.http import require_POST from django.views.generic import TemplateView, View from weblate.lang.models import Language from weblate.trans.forms import ( AnnouncementForm, ComponentDeleteForm, ComponentMoveForm, ComponentRenameForm, ComponentSettingsForm, ProjectDeleteForm, ProjectLanguageDeleteForm, ProjectRenameForm, ProjectSettingsForm, TranslationDeleteForm, ) from weblate.trans.models import Announcement, Change, Component from weblate.trans.tasks import ( component_removal, create_project_backup, project_removal, ) from weblate.trans.util import redirect_param, render from weblate.utils import messages from weblate.utils.stats import ProjectLanguage from weblate.utils.views import ( get_component, get_project, get_translation, show_form_errors, ) @never_cache @login_required def change_project(request, project): obj = get_project(request, project) if not request.user.has_perm("project.edit", obj): raise Http404() if request.method == "POST": settings_form = ProjectSettingsForm(request, request.POST, instance=obj) if settings_form.is_valid(): settings_form.save() messages.success(request, _("Settings saved")) return redirect("settings", project=obj.slug) else: messages.error( request, _("Invalid settings, please check the form for errors!") ) else: settings_form = ProjectSettingsForm(request, instance=obj) return render( request, "project-settings.html", {"object": obj, "form": settings_form}, ) @never_cache @login_required def change_component(request, project, component): obj = get_component(request, project, component) if not request.user.has_perm("component.edit", obj): raise Http404() if request.method == "POST": form = ComponentSettingsForm(request, request.POST, instance=obj) if form.is_valid(): form.save() messages.success(request, _("Settings saved")) return redirect("settings", project=obj.project.slug, component=obj.slug) else: messages.error( request, _("Invalid settings, please check the form for errors!") ) # Get a fresh copy of object, otherwise it will use unsaved changes # from the failed form obj = Component.objects.get(pk=obj.pk) else: form = ComponentSettingsForm(request, instance=obj) if obj.repo_needs_merge(): messages.warning( request, _( "The repository is outdated, you might not get " "expected results until you update it." ), ) return render( request, "component-settings.html", {"project": obj.project, "object": obj, "form": form}, ) @never_cache @login_required @require_POST def dismiss_alert(request, project, component): obj = get_component(request, project, component) if not request.user.has_perm("component.edit", obj): raise Http404() try: alert = obj.alert_set.get(name=request.POST["dismiss"]) if alert.obj.dismissable: alert.dismissed = True alert.save(update_fields=["dismissed"]) except ObjectDoesNotExist: pass return redirect_param(obj, "#alerts") @login_required @require_POST def remove_translation(request, project, component, lang): obj = get_translation(request, project, component, lang) if not request.user.has_perm("translation.delete", obj): raise PermissionDenied() form = TranslationDeleteForm(obj, request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#delete") obj.remove(request.user) messages.success(request, _("Translation has been removed.")) return redirect(obj.component) @login_required @require_POST def remove_component(request, project, component): obj = get_component(request, project, component) if not request.user.has_perm("component.edit", obj): raise PermissionDenied() form = ComponentDeleteForm(obj, request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#delete") component_removal.delay(obj.pk, request.user.pk) messages.success(request, _("Translation component was scheduled for removal.")) return redirect(obj.project) @login_required @require_POST def remove_project(request, project): obj = get_project(request, project) if not request.user.has_perm("project.edit", obj): raise PermissionDenied() form = ProjectDeleteForm(obj, request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#delete") project_removal.delay(obj.pk, request.user.pk) messages.success(request, _("Project was scheduled for removal.")) return redirect("home") @login_required @require_POST def remove_project_language(request, project, lang): project_object = get_project(request, project) language_object = get_object_or_404(Language, code=lang) obj = ProjectLanguage(project_object, language_object) if not request.user.has_perm("translation.delete", obj): raise PermissionDenied() form = ProjectLanguageDeleteForm(obj, request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#delete") for translation in obj.translation_set: translation.remove(request.user) messages.success(request, _("Language of the project was removed.")) return redirect(project_object) def perform_rename(form_cls, request, obj, perm: str): if not request.user.has_perm(perm, obj): raise PermissionDenied() # Make sure any non-rename related issues are resolved first try: obj.full_clean() except ValidationError as err: messages.error( request, _("Cannot rename due to outstanding issue in the configuration: %s") % err, ) return redirect_param(obj, "#rename") form = form_cls(request, request.POST, instance=obj) if not form.is_valid(): show_form_errors(request, form) # Reload the object from db to revert possible rejected change obj.refresh_from_db() return redirect_param(obj, "#rename") # Invalidate old stats obj.stats.invalidate() obj = form.save() # Invalidate new stats obj.stats.invalidate() return redirect(obj) @login_required @require_POST def rename_component(request, project, component): obj = get_component(request, project, component) return perform_rename(ComponentRenameForm, request, obj, "component.edit") @login_required @require_POST def move_component(request, project, component): obj = get_component(request, project, component) return perform_rename(ComponentMoveForm, request, obj, "project.edit") @login_required @require_POST def rename_project(request, project): obj = get_project(request, project) return perform_rename(ProjectRenameForm, request, obj, "project.edit") @login_required @require_POST def announcement_translation(request, project, component, lang): obj = get_translation(request, project, component, lang) if not request.user.has_perm("component.edit", obj): raise PermissionDenied() form = AnnouncementForm(request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#announcement") Announcement.objects.create( user=request.user, project=obj.component.project, component=obj.component, language=obj.language, **form.cleaned_data, ) return redirect(obj) @login_required @require_POST def announcement_component(request, project, component): obj = get_component(request, project, component) if not request.user.has_perm("component.edit", obj): raise PermissionDenied() form = AnnouncementForm(request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#announcement") Announcement.objects.create( user=request.user, project=obj.project, component=obj, **form.cleaned_data ) return redirect(obj) @login_required @require_POST def announcement_project(request, project): obj = get_project(request, project) if not request.user.has_perm("project.edit", obj): raise PermissionDenied() form = AnnouncementForm(request.POST) if not form.is_valid(): show_form_errors(request, form) return redirect_param(obj, "#announcement") Announcement.objects.create(user=request.user, project=obj, **form.cleaned_data) return redirect(obj) @login_required @require_POST def announcement_delete(request, pk): announcement = get_object_or_404(Announcement, pk=pk) if request.user.has_perm("announcement.delete", announcement): announcement.delete() return JsonResponse({"responseStatus": 200}) @login_required def component_progress(request, project, component): obj = get_component(request, project, component) return_url = "component" if "info" in request.GET else "guide" if not obj.in_progress(): return redirect(return_url, **obj.get_reverse_url_kwargs()) progress, log = obj.get_progress() return render( request, "component-progress.html", { "object": obj, "progress": progress, "log": "\n".join(log), "return_url": return_url, }, ) class BackupsMixin: @method_decorator(login_required) def setup(self, request, *args, **kwargs): super().setup(request, *args, **kwargs) self.obj = get_project(request, kwargs["project"]) if not request.user.has_perm("project.edit", self.obj): raise PermissionDenied() class BackupsView(BackupsMixin, TemplateView): template_name = "trans/backups.html" def post(self, request, *args, **kwargs): create_project_backup.delay(self.obj.pk) messages.success( request, _("Backup was triggered, it will be shorly available.") ) return redirect("backups", project=self.obj.slug) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["keep_count"] = settings.PROJECT_BACKUP_KEEP_COUNT context["keep_days"] = settings.PROJECT_BACKUP_KEEP_DAYS context["object"] = self.obj context["backups"] = self.obj.list_backups() return context class BackupsDownloadView(BackupsMixin, View): def get(self, request, *args, **kwargs): for backup in self.obj.list_backups(): if backup["name"] == kwargs["backup"]: return FileResponse( open(backup["path"], "rb"), as_attachment=True, filename=backup["name"], ) raise Http404
PypiClean
/DocOnce-1.5.15-py3-none-any.whl/doconce/plaintext.py
from __future__ import absolute_import from builtins import str from builtins import range from past.builtins import basestring import sys import regex as re from .common import default_movie, plain_exercise, bibliography, \ cite_with_multiple_args2multiple_cites, fix_ref_section_chapter from .misc import option def plain_author(authors_and_institutions, auth2index, inst2index, index2inst, auth2email): text = '\n' for author in auth2index: email = auth2email[author] email_text = '' if email is None else '(%s)' % email text += ' '.join([author, str(auth2index[author]), email_text]) + '\n' text += '\n' for index in index2inst: text += '[%d] %s\n' % (index, index2inst[index]) text += '\n' return text def plain_ref_and_label(section_label2title, format, filestr): filestr = fix_ref_section_chapter(filestr, format) # remove label{...} from output (when only label{} on a line, remove # the newline too, leave label in figure captions, and remove all the rest) #filestr = re.sub(r'^label\{.+?\}\s*$', '', filestr, flags=re.MULTILINE) cpattern = re.compile(r'^label\{.+?\}\s*$', flags=re.MULTILINE) filestr = cpattern.sub('', filestr) #filestr = re.sub(r'^(FIGURE:.+)label\{(.+?)\}', '\g<1>{\g<2>}', filestr, flags=re.MULTILINE) cpattern = re.compile(r'^(FIGURE:.+)label\{(.+?)\}', flags=re.MULTILINE) filestr = cpattern.sub('\g<1>{\g<2>}', filestr) filestr = re.sub(r'label\{.+?\}', '', filestr) # all the remaining # replace all references to sections: for label in section_label2title: filestr = filestr.replace('ref{%s}' % label, '"%s"' % section_label2title[label]) from .common import ref2equations filestr = ref2equations(filestr) return filestr def plain_index_bib(filestr, index, citations, pubfile, pubdata): if citations: filestr = cite_with_multiple_args2multiple_cites(filestr) for label in citations: filestr = filestr.replace('cite{%s}' % label, '[%d]' % citations[label]) if pubfile is not None: bibtext = bibliography(pubdata, citations, format='doconce') bibtext = re.sub(r'label\{.+?\} ', '', bibtext) # Remove boldface _author_ (typically 12. _John Doe and Jane Doe_.) bibtext = re.sub(r'(\d+)\. _(.+)_\.', '\g<2>', bibtext) filestr = re.sub(r'^BIBFILE:.+$', bibtext, filestr, flags=re.MULTILINE) # remove all index entries: filestr = re.sub(r'idx\{.+?\}\n?', '', filestr) # no index since line numbers from the .do.txt (in index dict) # never correspond to the output format file #filestr += '\n\n======= Index =======\n\n' #for word in index: # filestr + = '%s, line %s\n' % (word, ', '.join(index[word])) return filestr def plain_toc(sections, filestr): # Find minimum section level tp_min = 4 for title, tp, label in sections: if tp < tp_min: tp_min = tp s = 'Table of contents:\n\n' for title, tp, label in sections: s += ' '*(2*(tp-tp_min)) + title + '\n' return s def plain_box(text, title=''): """Wrap a box around the text, with a title on the upper box border.""" lines = text.splitlines() maxlen = max([len(line) for line in lines]) newlines = [] # title can be :: since equations and code must be preceeded by :: # and plaintext inserts a double colon if title == '' or title.lower() == 'none' or title == '::': newlines.append('|-' + '-'*maxlen + '-|') else: newlines.append(title + ' ' + '-'*(maxlen-len(title)) + '--|') for line in lines: newlines.append('| ' + line + ' '*(maxlen-len(line)) + ' |') newlines.append('|-' + '-'*maxlen + '-|') # Drop blank lines at the beginning drop = 0 for line in newlines[1:]: if re.search(r'[^\-| ]', line): break else: drop += 1 for i in range(drop): del newlines[1] if re.search(r'^\w', newlines[0]): # Insert a blank line newlines.insert(1, '| ' + ' '*maxlen + ' |') # Drop blank lines at the end drop = 0 for line in reversed(newlines[:-1]): if re.search(r'[^\-| ]', line): break else: drop += 1 for i in range(1, drop+1, 1): del newlines[-2] return '\n' + '\n'.join(newlines) + '\n' def plain_quiz(quiz): # Simple typesetting of a quiz import string question_prefix = quiz.get('question prefix', option('quiz_question_prefix=', 'Question:')) common_choice_prefix = option('quiz_choice_prefix=', 'Choice') quiz_expl = option('quiz_explanations=', 'on') text = '\n\n' if 'new page' in quiz: text += '======= %s =======\n\n' % (quiz['new page']) # Don't write Question: ... if inside an exercise section if quiz.get('embedding', 'None') in ['exercise',]: pass else: text += '\n' if question_prefix: text += '%s ' % (question_prefix) text += quiz['question'] + '\n\n' # List choices as paragraphs for i, choice in enumerate(quiz['choices']): #choice_no = i+1 choice_no = string.ascii_uppercase[i] answer = choice[0].capitalize() + '!' choice_prefix = common_choice_prefix if 'choice prefix' in quiz: if isinstance(quiz['choice prefix'][i], basestring): choice_prefix = quiz['choice prefix'][i] if choice_prefix == '' or choice_prefix[-1] in ['.', ':', '?']: pass # don't add choice number/letter else: choice_prefix += ' %s:' % choice_no # Let choice start with a newline if pure code starts the choice # (test for different code block types so this function can work # for other formats too...) choice = choice[1].lstrip() code_starters = 'Code::', '~~~', '```', '{{{' for code_starter in code_starters: if choice.startswith(code_starter): choice = '\n' + choice # Cannot treat explanations text += '%s %s\n\n' % (choice_prefix, choice) return text def define(FILENAME_EXTENSION, BLANKLINE, INLINE_TAGS_SUBST, CODE, LIST, ARGLIST, TABLE, EXERCISE, FIGURE_EXT, CROSS_REFS, INDEX_BIB, TOC, ENVIRS, QUIZ, INTRO, OUTRO, filestr): # all arguments are dicts and accept in-place modifications (extensions) FILENAME_EXTENSION['plain'] = '.txt' BLANKLINE['plain'] = '\n' # replacement patterns for substitutions of inline tags encoding = 'utf-8' INLINE_TAGS_SUBST['plain'] = { 'math': r'\g<begin>\g<subst>\g<end>', # drop $ signs 'math2': r'\g<begin>\g<puretext>\g<end>', 'emphasize': None, 'bold': None, 'figure': None, 'movie': default_movie, 'verbatim': r'\g<begin>\g<subst>\g<end>', # no ` chars #'linkURL': r'\g<begin>\g<link> (\g<url>)\g<end>', 'linkURL2': r'\g<link> (\g<url>)', 'linkURL3': r'\g<link> (\g<url>)', 'linkURL2v': r'\g<link> (\g<url>)', 'linkURL3v': r'\g<link> (\g<url>)', 'plainURL': r'\g<url>', 'colortext': '\g<text>', 'title': r'======= \g<subst> =======\n', # doconce top section, to be substituted later 'author': plain_author, 'date': r'\nDate: \g<subst>\n', 'chapter': lambda m: '%s\n%s' % (m.group('subst'), '%'*len(m.group('subst'))), 'section': lambda m: '%s\n%s' % (m.group('subst'), '='*len(m.group('subst'))), 'subsection': lambda m: '%s\n%s' % (m.group('subst'), '-'*len(m.group('subst'))), 'subsubsection': lambda m: '%s\n%s\n' % (m.group('subst'), '~'*len(m.group('subst'))), 'paragraph': r'*\g<subst>*\g<space>', # extra blank 'abstract': r'\n*\g<type>.* \g<text>\g<rest>', 'linebreak': r'\g<text>', 'footnote': None, 'non-breaking-space': ' ', 'ampersand2': r' \g<1>&\g<2>', } from .rst import rst_code CODE['plain'] = rst_code from .common import DEFAULT_ARGLIST ARGLIST['plain'] = DEFAULT_ARGLIST LIST['plain'] = { 'itemize': {'begin': '', 'item': '*', 'end': '\n'}, 'enumerate': {'begin': '', 'item': '%d.', 'end': '\n'}, 'description': {'begin': '', 'item': '%s', 'end': '\n'}, 'separator': '\n', } CROSS_REFS['plain'] = plain_ref_and_label from .rst import rst_table TABLE['plain'] = rst_table #TABLE['plain'] = plain_table EXERCISE['plain'] = plain_exercise INDEX_BIB['plain'] = plain_index_bib TOC['plain'] = plain_toc from .common import indent_lines ENVIRS['plain'] = { 'warning': lambda block, format, title='Warning', text_size='normal': plain_box(block, title), 'notice': lambda block, format, title='Notice', text_size='normal': plain_box(block, title), 'question': lambda block, format, title='Question', text_size='normal': plain_box(block, title), 'hint': lambda block, format, title='Hint', text_size='normal': plain_box(block, title), 'summary': lambda block, format, title='Summary', text_size='normal': plain_box(block, title), 'block': lambda block, format, title='Block', text_size='normal': plain_box(block, title), 'box': lambda block, format, title='none', text_size='normal': plain_box(block, title), 'quote': lambda block, format, title='none', text_size='normal': indent_lines(block, 'plain'), } QUIZ['plain'] = plain_quiz
PypiClean
/CNFgen-0.9.2-py3-none-any.whl/cnfgen/families/cliquecoloring.py
from itertools import combinations from cnfgen.formula.cnf import CNF from cnfgen.localtypes import non_negative_int def CliqueColoring(n, k, c, formula_class=CNF): r"""Clique-coloring CNF formula The formula claims that a graph :math:`G` with :math:`n` vertices simultaneously contains a clique of size :math:`k` and a coloring of size :math:`c`. If :math:`k = c + 1` then the formula is clearly unsatisfiable, and it is the only known example of a formula hard for cutting planes proof system. [1]_ Variables :math:`e_{u,v}` to encode the edges of the graph. Variables :math:`q_{i,v}` encode a function from :math:`[k]` to :math:`[n]` that represents a clique. Variables :math:`r_{v,\ell}` encode a function from :math:`[n]` to :math:`[c]` that represents a coloring. Parameters ---------- n : number of vertices in the graph k : size of the clique c : size of the coloring Returns ------- A CNF object References ---------- .. [1] Pavel Pudlak. Lower bounds for resolution and cutting plane proofs and monotone computations. Journal of Symbolic Logic (1997) """ non_negative_int(n, 'n') non_negative_int(k, 'k') non_negative_int(c, 'c') description = "There is a graph of {0} vertices with a {1}-clique and a {2}-coloring".format( n, k, c) F = formula_class(description=description) # Variables e = F.new_combinations(n,2,label='e_{{{}}}') q = F.new_mapping(k,n,label='q_{{{0},{1}}}') r = F.new_mapping(n,c,label='r_{{{0},{1}}}') # some vertex is i'th member of clique F.force_complete_mapping(q) F.force_functional_mapping(q) F.force_injective_mapping(q) for u, v in e.indices(): for i, j in combinations(q.domain(), 2): F.add_clause([e(u, v), -q(i, u), -q(j, v)]) F.add_clause([e(u, v), -q(i, v), -q(j, u)]) # every vertex v has exactly one colour F.force_complete_mapping(r) F.force_functional_mapping(r) # neighbours have distinct colours for u, v in e.indices(): for ell in r.range(): F.add_clause([-e(u, v), -r(u, ell), -r(v, ell)]) return F
PypiClean
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/ishop/static/cubane/ishop/js/default.js
(function(){ "use strict"; cubane.require('cubane.dialog'); if ( window['ishop'] === undefined ) window.ishop = {}; /******************************************************************************* * Calculate and round price and convert from net to gross based on current vat * rate. ******************************************************************************/ ishop.PriceInputController = function(url, vat, net, gross, price_calculation) { this.init(url, vat, net, gross, price_calculation); }; ishop.PriceInputController.prototype = { PRICE_CALCULATION: { GROSS: 'gross', GROSS_ONLY: 'gross-only', NET: 'net' }, init: function(url, vat, net, gross, price_calculation) { this._bound = { onVatChanged: $.proxy(this.onVatChanged, this), onNetChanged: $.proxy(this.onNetChanged, this), onGrossChanged: $.proxy(this.onGrossChanged, this), }; this._url = url; this._vat = vat; this._net = net; this._gross = gross; this._price_calculation = price_calculation; this._vat.bind('change', this._bound.onVatChanged); this._net.bind('change', this._bound.onNetChanged); this._gross.bind('change', this._bound.onGrossChanged); }, dispose: function() { this._vat.unbind('change', this._bound.onVatChanged); this._net.unbind('change keyup', this._bound.onNetChanged); this._gross.unbind('change keyup', this._bound.onGrossChanged); this._bound = null; this._url = null; this._vat = null; this._net = null; this._gross = null; this._price_calculation = null; }, onVatChanged: function() { if ( this._price_calculation == this.PRICE_CALCULATION.GROSS ) { this.update($('#id_net_price').val(), null); } else if ( this._price_calculation == this.PRICE_CALCULATION.NET ) { this.update(null, $('#id_gross_price').val()); } }, onNetChanged: function() { this.update($('#id_net_price').val(), null); }, onGrossChanged: function() { this.update(null, $('#id_gross_price').val()); }, update: function(net, gross) { var data = { vat: $('#id_vat').val() }; if ( net !== null ) data['net'] = net; if ( gross !== null ) data['gross'] = gross; $.post(this._url, data, $.proxy(function(json) { $('#id_net_price').val(json.net); $('#id_gross_price').val(json.gross); }, this), 'json'); } }; /******************************************************************************* * Round price values after changing them ******************************************************************************/ ishop.PriceRounder = function(url, elements) { this.init(url, elements); }; ishop.PriceRounder.prototype = { init: function(url, elements) { this._bound = { onChanged: $.proxy(this.onChanged, this) } this._url = url this._elements = elements; this._elements.bind('change', this._bound.onChanged); }, dispose: function() { this._elements.unbind('change', this._bound.onChanged); this._url = null; this._elements = null; this._bound = null; }, onChanged: function(e) { var input = $(e.target); $.post(this._url, { value: input.val() }, function(json) { input.val(json.value); }, 'json'); } }; /******************************************************************************* * Stock level controller ******************************************************************************/ ishop.StockLevelController = function () { this.init(); }; ishop.StockLevelController.prototype = { init: function () { this._bound = { onStockChanged: $.proxy(this.onStockChanged, this) }; $('.stock').bind('change', this._bound.onStockChanged); this.updateUIState(); }, dispose: function () { $('.stock').unbind('change', this._bound.onStockChanged); this._bound = null; }, onStockChanged: function (e) { var select = $(e.target); this.updateUIState(select); }, updateUIState: function(select) { if ( !select ) { var elements = $('.stock'); for ( var i = 0; i < elements.length; i++ ) { this.updateUIState(elements.eq(i)); } } else { var level = select.closest('tr').find('.stocklevel'); if ( select.val() == '3' ) { level.css('opacity', 1).attr('disabled', false); } else { level.css('opacity', 0.3).attr('disabled', true); } } } }; /* * US States */ $(document).ready(function() { var usstates = new innershed.USStates(); var usstates_delivery = new innershed.USStates( $('#id_delivery_country'), $('#id_delivery_county'), $('#id_delivery_postcode') ); }); /* * Shop Data Import */ $(document).ready(function() { $('.cubane-ishop-dataimport-form form').on('submit', function() { cubane.dialog.working('Importing data', 'This process may take a minute, Please Wait...'); }); }); }());
PypiClean