id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/FuzzyClassificator-1.3.84-py3-none-any.whl/pybrain/rl/environments/functions/function.py
from pybrain.utilities import setAllArgs __author__ = 'Tom Schaul, [email protected]' from scipy import zeros, array, ndarray from pybrain.rl.environments import Environment from pybrain.structure.parametercontainer import ParameterContainer from pybrain.rl.environments.fitnessevaluator import FitnessEvaluator class FunctionEnvironment(Environment, FitnessEvaluator): """ A n-to-1 mapping function to be with a single minimum of value zero, at xopt. """ # what input dimensions can the function have? xdimMin = 1 xdimMax = None xdim = None # the (single) point where f = 0 xopt = None # what would be the desired performance? by default: something close to zero desiredValue = 1e-10 toBeMinimized = True # does the function already include a penalization term, to keep search near the origin? penalized = False def __init__(self, xdim = None, xopt = None, xbound=5, feasible=True, constrained=False, violation=False, **args): self.feasible=feasible self.constrained=constrained self.violation=violation self.xbound=xbound if xdim is None: xdim = self.xdim if xdim is None: xdim = self.xdimMin assert xdim >= self.xdimMin and not (self.xdimMax is not None and xdim > self.xdimMax) self.xdim = xdim if xopt is None: self.xopt = zeros(self.xdim) else: self.xopt = xopt setAllArgs(self, args) self.reset() def __call__(self, x): if isinstance(x, ParameterContainer): x = x.params assert type(x) == ndarray, 'FunctionEnvironment: Input not understood: '+str(type(x)) return self.f(x) # methods for conforming to the Environment interface: def reset(self): self.result = None def getSensors(self): """ the one sensor is the function result. """ tmp = self.result assert tmp is not None self.result = None return array([tmp]) def performAction(self, action): """ the action is an array of values for the function """ self.result = self(action) @property def indim(self): return self.xdim # does not provide any observations outdim = 0
PypiClean
/EveDiscordBot-0.1.1.tar.gz/EveDiscordBot-0.1.1/evediscordbot/dbCalls.py
import mysql.connector import datetime import os host_var = os.environ['host_var'] username_var = os.environ['username_var'] pass_var = os.environ['pass_var'] def opendb(): '''Helper function that opens database connection''' return mysql.connector.connect(host=host_var, user=username_var, password=pass_var) def enterStrike(userID,username): '''enters a strike for the specified user''' mydb = opendb() time = datetime.datetime.now() print(time) cursor = mydb.cursor() sql = "INSERT INTO evelynnmains.striketable(userid,strikeDate,username) VALUES (%s,%s,%s) " inserttuple = (userID,time,username) cursor.execute(sql,inserttuple) mydb.commit() print("done") mydb.close() def clearStrikes(userID): '''clears ALL strikes for the specified user''' mydb = opendb() cursor = mydb.cursor() sql = "DELETE FROM evelynnmains.striketable WHERE userid = %s" user = (userID,) cursor.execute(sql,user) mydb.commit() mydb.close() def getallStrikes(): ''' returns strikes for all users in the database as a markdown formatted string''' mydb = opendb() cursor = mydb.cursor() sql = "SELECT username,COUNT(userid),MAX(strikeDate) FROM evelynnmains.striketable GROUP BY userid ORDER BY COUNT(userid) ASC " cursor.execute(sql) result = cursor.fetchall() outstr = '| Username | Strikes | Latest Strike (CST) |\n|--------------------------------|---------|---------------------|' for i in result: y = 31 - len(i[0]) namespace = " " * y outstr = outstr + (('\n| {}| {} | {} |').format(i[0] + namespace, i[1], i[2])) mydb.close() return outstr def getStrikes(userID): mydb = opendb() cursor = mydb.cursor() sql = "SELECT COUNT(userid) FROM evelynnmains.striketable WHERE userid= %s" user = (userID,) cursor.execute(sql,user) result = cursor.fetchall() return result[0][0] mydb.commit() mydb.close()
PypiClean
/IST411Team1AduDarko-1.0.0.tar.gz/IST411Team1AduDarko-1.0.0/IST411Team1/node3.py
import Pyro4, subprocess, zlib, time, pysftp, json, hashlib, p2p from node5 import Node5 bool = False beginTime = time.clock() @Pyro4.expose class node3: def __init__(self,name,daemon): self.name = name self.json = None self.daemon = daemon self.crcChecksum = None self.recSFTPChecksum = None """Getting the json""" def get_json(self): compJson = self.compressPayload(self.json) print("Compressed message: ",compJson) bool = True return compJson """Method to handle daemon shutdown""" def shutDown(self): shutDown = False try: self.daemon.shutdown() shutDown = True except Exception as e: print(e) return shutDown """Compressing Payload""" def compressPayload(self,data): try: print(self.name, " is compressing payload...") payloadComp = zlib.compress(data) log = {"Node":self.name,"Compressed payload":str(payloadComp)} Node5.log(log) return payloadComp except Exception as e: print(e) """Generating Checsum for Compressed Payload""" def genCrcChecksum(self,data): try: print(self.name," is generating checksum...") checksum = zlib.crc32(data) print(self.name," checksum: ",checksum) log = {"Node":self.name,"CRC Checksum":str(checksum)} Node5.log(log) return checksum except Exception as e: print(e) """Getting the Checksum""" def getChecksum(self): return self.crcChecksum """Receiving the payload vis SFTP""" def receiveSFTPPayload(self): try: print(self.name," is retrieving payload from remote directory via SFTP...") payload = None cnopts = pysftp.CnOpts() cnopts.hostkeys = None cinfo = {'cnopts':cnopts,'host':'oz-ist-linux-fa17-411','username':'ftpuser','password':'test1234','port':101} with pysftp.Connection(**cinfo) as sftp: sftp.get('/home/ftpuser/Team1SFTPpayload.json','Team1SFTPReceived.json') with open('Team1SFTPReceived.json','r') as inFile: payload = json.load(inFile) payload = payload.encode('utf-8') log = {"Name":self.name,"Payload received via SFTP":str(payload)} Node5.log(log) return payload except Exception as e: print(e) """Receiving the Payload's Checksum via SFTP""" def receiveSFTPChecksum(self): try: print(self.name," is retrieving payload's checksum from remote directory via SFTP...") checksum = None cnopts = pysftp.CnOpts() cnopts.hostkeys = None cinfo = {'cnopts':cnopts,'host':'oz-ist-linux-fa17-411','username':'ftpuser','password':'test1234','port':101} with pysftp.Connection(**cinfo) as sftp: sftp.get('/home/ftpuser/Team1SFTPchecksum.txt','Team1ChecksumReceived.txt') with open('Team1ChecksumReceived.txt','r') as inFile: checksum = inFile.read() log = {"Node":self.name,"Checksum received via SFTP":str(checksum)} Node5.log(log) return checksum except Exception as e: print(e) "Authenticating Payload by Checking to see if Checksums match""" def verifySFTPChecksum(self,checksum,payload): verifyPerformed = False try: checksumOfPayload = hashlib.sha256(payload).hexdigest() print(checksumOfPayload) print(checksum) if checksumOfPayload == checksum: print("Checksum of payload received from Node 2 via SFTP verifed.") log = {"Node":self.name,"Checksum received via SFTP is verified":"True","Checksum Received":str(checksum),"Checksum Generated for matching":str(checksumOfPayload)} Node5.log(log) else: print("Payload received from Node 2 via SFTP has been compromised.") verifyPerformed = True except Exception as e: print(e) return verifyPerformed if __name__ == '__main__': print("Starting Node 3...") daemon = Pyro4.Daemon() node3 = node3("Node 3",daemon) node3.json = node3.receiveSFTPPayload() node3.recSFTPChecksum = node3.receiveSFTPChecksum() node3.verifySFTPChecksum(node3.recSFTPChecksum,node3.json) node3.crcChecksum = node3.genCrcChecksum(node3.json) uri = node3.daemon.register(node3) print(node3.name + "'s uri: ",uri) print(node3.name," is ready for remote access via Pyro4.") p2pTime = p2p.end(beginTime) log = {"Node":node3.name,"P2P payload time in seconds":p2pTime} Node5.log(log) print(node3.name," to Node 4 payload time: ",p2pTime," seconds") subprocess.Popen(['python3','node4.py',str(uri)]) node3.daemon.requestLoop()
PypiClean
/DendroPy_calver-2023.330.2-py3-none-any.whl/dendropy/datamodel/charstatemodel.py
############################################################################## ## DendroPy Phylogenetic Computing Library. ## ## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder. ## All rights reserved. ## ## See "LICENSE.rst" for terms and conditions of usage. ## ## If you use this work or any portion thereof in published work, ## please cite it as: ## ## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library ## for phylogenetic computing. Bioinformatics 26: 1569-1571. ## ############################################################################## """ Character state definitions and alphabets. Certain state alphabets, such as DNA, RNA, protein, etc. are defined here. These are termed "fixed" state alphabets, and for each distinct state alphabet concept (e.g., DNA), there is one and only one instance of a representation of that concept (i.e., all DNA-type data in DendroPy, regardless of the source, refer to the same instance of the state alphabet and state alphabet elements). """ import collections import itertools from dendropy.datamodel import basemodel from dendropy.utility import textprocessing from dendropy.utility import container ############################################################################### ## StateAlphabet class StateAlphabet( basemodel.DataObject, basemodel.Annotable): """ A master registry mapping state symbols to their definitions. There are two classes or "denominations" of states: - fundamental states These are the basic, atomic, self-contained states of the alphabet, distinct and mutually-exclusive from every other fundamental state. E.g., for DNA: adenine, guanine, cytosine, and thymine. - multi-state states The states are second-level or "pseudo-states", in that they are not properly states in and of themselves, but rather each consist of a set of other states. That is, a multi-state state is a set of two or more fundamental states. Multi-state states are of one of two types: "ambiguous" and "polymorphic" states. "Ambiguous" states represent states in which the true fundamental state is unknown, but consists of one of the fundamental states to which the ambiguous states map. "Polymorphic" states represent states in which the entity actually has multiple fundamental states simultaneously. "Ambiguous" states are an expression of uncertainty or lack of knowledge about the identity of state. With "polymorphic" states, on the other hand, there is no uncertaintly or lack of knowledge about the state: the state is known definitively, and it consists of multiple fundamental states. An example of an ambiguous state would be 'N', representing any base in molecular sequence data. An example of a polymorphic state would be the range of a widespread species found in multiple geographic units. Note that multi-state states can be specified in terms of other multi-state states, but that upon instantiation, these member multi-states will be expanded to their fundamental states. State definitions or identities are immutable: their symbology and mappings cannot be changed after creation/initialization. State definitions and identities, however, can be added/removed from a state alphabet. Parameters ---------- label : string, optional The name for this state alphabet. fundamental_states : iterable of strings An iterable of symbols defining the fundamental (i.e., non-ambiguous and non-polymorphic states of this alphabet), with a 1-to-1 correspodence between symbols and states. Each state will also be automatically indexed base on its position in this list. For DNA, this would be something like: ``'ACGT'`` or ``('A', 'C', 'G', T')``. For "standard" characters, this would be something like ``'01'`` or ``('0', '1')``. no_data_symbol : string If specified, automatically creates a "no data" ambiguous state, represented by the (canonical, or primary) symbol "no_data_symbol", which maps to all fundamental states. This will also insert |None| into all symbol look-up maps, which, when dereferenced will return this state. Furthermore, the attribute ``self.no_data_symbol`` will return this symbol and ``self.no_data_state`` will return this state. The 'no data' state will be an ambiguous multistate type. ambiguous_states : iterable of tuples An iterable consisting of tuples expressing ambiguous state symbols and the set of symbols representing the fundamental states to which they map. The first element in the tuple is the symbol used to represent the ambiguous state; this can be blank (""), but if not blank it needs to be unique across all symbols (including case-variants if the state alphabet is case-insensitive). The second element is an iterable of fundamental state symbols to which this ambiguous state maps. The fundamental state symbols *must* have already been defined, i.e. given in the value passed to ``fundamental_states``. Note: a dictionary may seem like a more tractable structure than iterable of tuples, but we may need to specify multiple anonymous or blank ambiguous states. polymorphic_states : iterable of tuples An iterable consisting of tuples expressing polymorphic state symbols and the set of symbols representing the fundamental states to which they map. The first element in the tuple is the symbol used to represent the polymorphic state; this can be blank (""), but if not blank it needs to be unique across all symbols (including case-variants if the state alphabet is case-insensitive). The second element is an iterable of fundamental state symbols to which this polymorphic state maps. The fundamental state symbols *must* have already been defined, i.e. given in the value passed to ``fundamental_states``. Note: a dictionary may seem like a more tractable structure than iterable of tuples, but we may need to specify multiple anonymous or blank polymorphic states. symbol_synonyms : dictionary A mapping of symbols, with keys being the new symbols and values being (already-defined) symbols of states to which they map. This provides a mechanism by which states with multiple symbols can be managed. For example, an ambiguous state, "unknown", representing all fundamental states might be defined with '?' as its primary symbol, and a synonym symbol for this state might be 'X'. """ ########################################################################### ### CLass-level Constants FUNDAMENTAL_STATE = 0 AMBIGUOUS_STATE = 1 POLYMORPHIC_STATE = 2 ########################################################################### ### Life-Cycle and Identity def __init__(self, fundamental_states=None, ambiguous_states=None, polymorphic_states=None, symbol_synonyms=None, no_data_symbol=None, gap_symbol=None, label=None, case_sensitive=True): basemodel.DataObject.__init__(self, label=label) self._is_case_sensitive = case_sensitive # Core collection underlying alphabet self._fundamental_states = [] self._ambiguous_states = [] self._polymorphic_states = [] # Look-up mappings self._state_identities = None self._canonical_state_symbols = None self._canonical_symbol_state_map = None self._full_symbol_state_map = None self._index_state_map = None self._fundamental_states_to_ambiguous_state_map = None self._fundamental_states_to_polymorphic_state_map = None # Suppress for initialization self.autocompile_lookup_tables = False # This identifies the gap state when compiling the state alphabet. The # principle purpose behind this is to be able to tell the gap state # that it is, indeed, a gap state. And the purpose of this, in turn, # is so that the when the gap state is asked for its fundamental # indexes, it will return the fundamental indexes of the missing data # state in its place if it is *NOT* to be treated as a fifth # fundamental state. self.gap_state = None self._gap_symbol = None self.no_data_state = None self._no_data_symbol = None # Populate core collection if fundamental_states: for symbol in fundamental_states: self.new_fundamental_state(symbol) if gap_symbol: self.gap_state = self.new_fundamental_state(gap_symbol) self._gap_symbol = gap_symbol if no_data_symbol: self.no_data_state = self.new_ambiguous_state( symbol=no_data_symbol, member_states=self._fundamental_states) self._no_data_symbol = no_data_symbol if ambiguous_states: for ss in ambiguous_states: self.new_ambiguous_state(symbol=ss[0], member_state_symbols=ss[1]) if polymorphic_states: for ss in polymorphic_states: self.new_polymorphic_state(symbol=ss[0], member_state_symbols=ss[1]) if symbol_synonyms: for k in symbol_synonyms: self.new_symbol_synonym(k, symbol_synonyms[k]) # Build mappings self.compile_lookup_mappings() # Post-initialization self.autocompile_lookup_tables = True def __hash__(self): return id(self) def __eq__(self, other): return other is self def __copy__(self, memo=None): return self def taxon_namespace_scoped_copy(self, memo=None): return self def __deepcopy__(self, memo=None): return self ########################################################################### ### Symbol Management def _direct_get_state_for_symbol(self, symbol): """ Returns the |StateIdentity| instance corresponding to ``symbol``. """ for state_symbol, state in self.symbol_state_pair_iter(include_synonyms=True): if state_symbol == symbol: return state raise KeyError(symbol) def _direct_get_fundamental_states_for_symbols(self, symbols): """ Returns the list of |StateIdentity| instances corresponding to the iterable of symbols given by ``symbols``, with each element in ``symbols`` corresponding to a single symbol. """ ss = [] for symbol in symbols: state = self._direct_get_state_for_symbol(symbol) ss.extend(state.fundamental_states) return tuple(ss) def _validate_new_symbol(self, symbol): if symbol is None or symbol == "": raise ValueError("Cannot validate empty symbol") symbol = str(symbol) for state_symbol, state in self.symbol_state_pair_iter(include_synonyms=True): if state_symbol == symbol: raise ValueError("State with symbol or symbol synonym of '{}' already defined is this alphabet".format(symbol)) return symbol def new_fundamental_state(self, symbol): """ Adds a new fundamental state to the collection of states in this alphabet. Parameters ---------- symbol : string The symbol used to represent this state. Cannot have previously been used to refer to any other state, fundamental or otherwise, as a primary or synonymous symbol (including implicit synonyms given by case-variants if the state alphabet is not case-sensitive). Cannot be blank ("") or |None|. Returns ------- s : |StateIdentity| The new state created and added. """ if symbol is None or symbol == "": raise ValueError("Fundamental states cannot be defined without a valid symbol") symbol = self._validate_new_symbol(symbol) index = len(self._fundamental_states) new_state = StateIdentity( symbol=symbol, index=index, state_denomination=StateAlphabet.FUNDAMENTAL_STATE, member_states=None) self._fundamental_states.append(new_state) if not self._is_case_sensitive: for s in (symbol.upper(), symbol.lower()): if s != symbol: self.new_symbol_synonym(s, symbol) if self.autocompile_lookup_tables: self.compile_symbol_lookup_mappings() return new_state def new_ambiguous_state(self, symbol, **kwargs): """ Adds a new ambiguous state to the collection of states in this alphabet. Parameters ---------- symbol : string or None The symbol used to represent this state. Cannot have previously been used to refer to any other state, fundamental or otherwise, as a primary or synonymous symbol (including implicit synonyms given by case-variants if the state alphabet is not case-sensitive). Can be blank ("") or |None| if there. \*\*kwargs : keyword arguments, mandatory Exactly one of the following must be specified: member_state_symbols : iterable of strings List of symbols representing states to which this state maps. Symbols representing multistates will taken to refer to the set of fundamental states to which they, in turn, map. member_states : iterable of |StateIdentity| objects List of |StateIdentity| representing states to which this state maps. Returns ------- s : |StateIdentity| The new state created and added. """ return self.new_multistate( symbol=symbol, state_denomination=StateAlphabet.AMBIGUOUS_STATE, **kwargs) def new_polymorphic_state(self, symbol, **kwargs): """ Adds a new polymorphic state to the collection of states in this alphabet. Parameters ---------- symbol : string or None The symbol used to represent this state. Cannot have previously been used to refer to any other state, fundamental or otherwise, as a primary or synonymous symbol (including implicit synonyms given by case-variants if the state alphabet is not case-sensitive). Can be blank ("") or |None| if there. \*\*kwargs : keyword arguments, mandatory Exactly one of the following must be specified: member_state_symbols : iterable of strings List of symbols representing states to which this state maps. Symbols representing multistates will taken to refer to the set of fundamental states to which they, in turn, map. member_states : iterable of |StateIdentity| objects List of |StateIdentity| representing states to which this state maps. Returns ------- s : |StateIdentity| The new state created and added. """ return self.new_multistate( symbol=symbol, state_denomination=StateAlphabet.POLYMORPHIC_STATE, **kwargs) def new_multistate(self, symbol, state_denomination, **kwargs): """ Adds a new polymorphic or ambiguous state to the collection of states in this alphabet. Parameters ---------- symbol : string or None The symbol used to represent this state. Cannot have previously been used to refer to any other state, fundamental or otherwise, as a primary or synonymous symbol (including implicit synonyms given by case-variants if the state alphabet is not case-sensitive). Can be blank ("") or |None| if there. state_denomination : enum StateAlphabet.POLYMORPHIC_STATE or StateAlphabet.AMBIGUOUS_STATE \*\*kwargs : keyword arguments, mandatory Exactly one of the following must be specified: member_state_symbols : iterable of strings List of symbols representing states to which this state maps. Symbols representing multistates will taken to refer to the set of fundamental states to which they, in turn, map. member_states : iterable of |StateIdentity| objects List of |StateIdentity| representing states to which this state maps. Returns ------- s : |StateIdentity| The new state created and added. """ if symbol is not None and symbol != "": symbol = self._validate_new_symbol(symbol) if len(kwargs) != 1: raise TypeError("Exactly one of 'member_state_symbols' or 'member_states' is required") if "member_state_symbols" in kwargs: member_states = self._direct_get_fundamental_states_for_symbols(kwargs["member_state_symbols"]) elif "member_states" in kwargs: member_states = kwargs["member_states"] else: raise ValueError("Exactly one of 'member_state_symbols' or 'member_states' is required") new_state = StateIdentity( symbol=symbol, index=None, state_denomination=state_denomination, member_states=member_states) if state_denomination == StateAlphabet.POLYMORPHIC_STATE: self._polymorphic_states.append(new_state) elif state_denomination == StateAlphabet.AMBIGUOUS_STATE: self._ambiguous_states.append(new_state) else: raise ValueError(state_denomination) if symbol and not self._is_case_sensitive: for s in (symbol.upper(), symbol.lower()): if s != symbol: self.new_symbol_synonym(s, symbol) if self.autocompile_lookup_tables: if symbol: self.compile_symbol_lookup_mappings() self.compile_member_states_lookup_mappings() return new_state def new_symbol_synonym(self, symbol_synonym, referenced_symbol): """ Defines an alternative symbol mapping for an existing state. Parameters ---------- symbol_synonym : string The (new) alternative symbol. referenced_symbol : string The symbol for the state to which the alternative symbol will also map. Returns ------- s : |StateIdentity| The state to which this synonym maps. ------ """ if symbol_synonym is None or symbol_synonym == "": raise ValueError("Symbol synonym cannot be empty") symbol_synonym = self._validate_new_symbol(symbol_synonym) state = self._direct_get_state_for_symbol(referenced_symbol) if symbol_synonym in state.symbol_synonyms: raise ValueError("Symbol synonym '{}' already defined for state '{}".format(symbol_synonym, state)) state.symbol_synonyms.append(symbol_synonym) if self.autocompile_lookup_tables: self.compile_symbol_lookup_mappings() return state ########################################################################### ### Optimization/Sugar: Lookup Mappings and Attribute Settings def compile_lookup_mappings(self): """ Builds lookup tables/mappings for quick referencing and dereferencing of symbols/states. """ self.compile_symbol_lookup_mappings() self.compile_member_states_lookup_mappings() def compile_member_states_lookup_mappings(self): """ Builds lookup tables/mappings for quick referencing and dereferencing of ambiguous/polymorphic states based on the fundamental states to which they map. """ temp_fundamental_states_to_ambiguous_state_map = {} temp_fundamental_states_to_polymorphic_state_map = {} if self.no_data_state is not None: assert self.no_data_state in self._ambiguous_states self.no_data_state.member_states = tuple(self.fundamental_state_iter()) for idx, state in enumerate(self.state_iter()): if state.state_denomination == StateAlphabet.AMBIGUOUS_STATE: member_states = frozenset(state.member_states) if member_states in temp_fundamental_states_to_ambiguous_state_map: pass # raise ValueError("Multiple definitions of ambiguous state with member states of '{}': {}, {}. Define a symbol synonym instead.".format( # state.member_states_str, temp_fundamental_states_to_ambiguous_state_map[member_states], state)) else: temp_fundamental_states_to_ambiguous_state_map[member_states] = state elif state.state_denomination == StateAlphabet.POLYMORPHIC_STATE: member_states = frozenset(state.member_states) if member_states in temp_fundamental_states_to_polymorphic_state_map: pass # raise ValueError("Multiple definitions of polymorphic state with member states of '{}': {}, {}. Define a symbol synonym instead.".format( # state.member_states_str, temp_fundamental_states_to_polymorphic_state_map[member_states], state)) else: temp_fundamental_states_to_polymorphic_state_map[member_states] = state self._fundamental_states_to_ambiguous_state_map = container.FrozenOrderedDict(temp_fundamental_states_to_ambiguous_state_map) self._fundamental_states_to_polymorphic_state_map = container.FrozenOrderedDict(temp_fundamental_states_to_polymorphic_state_map) def _set_symbol_mapping(self, d, symbol, state): if symbol is None or symbol == "": raise ValueError("Symbol synonym cannot be empty") assert symbol not in d d[symbol] = state def compile_symbol_lookup_mappings(self): """ Builds lookup tables/mappings for quick referencing and dereferencing of state symbology. """ temp_states = [] temp_symbols = [] temp_canonical_symbol_state_map = collections.OrderedDict() temp_full_symbol_state_map = collections.OrderedDict() temp_index_state_map = collections.OrderedDict() # if self._gap_symbol is not None and self.no_data_state is None: # self.no_data_state = self.new_ambiguous_state(symbol=None, # member_states=self._fundamental_states) if self.no_data_state is not None: assert self.no_data_symbol == self.no_data_state.symbol, "{} != {}".format(self.no_data_symbol, self.no_data_state.symbol) temp_full_symbol_state_map[None] = self.no_data_state for idx, state in enumerate(self.state_iter()): temp_states.append(state) if state.symbol: temp_symbols.append(state.symbol) assert state.symbol not in temp_canonical_symbol_state_map temp_canonical_symbol_state_map[state.symbol] = state self._set_symbol_mapping( temp_full_symbol_state_map, state.symbol, state) if state.symbol_synonyms: for ss in state.symbol_synonyms: self._set_symbol_mapping( temp_full_symbol_state_map, ss, state) else: assert state.state_denomination != StateAlphabet.FUNDAMENTAL_STATE state._index = idx if self.gap_state is not None and state is self.gap_state and self.no_data_state is not None: state.is_gap_state = True state.gap_state_as_no_data_state = self.no_data_state else: state.is_gap_state = False state.gap_state_as_no_data_state = None temp_index_state_map[idx] = state self._state_identities = tuple(temp_states) self._canonical_state_symbols = tuple(temp_symbols) self._canonical_symbol_state_map = container.FrozenOrderedDict(temp_canonical_symbol_state_map) self._full_symbol_state_map = container.FrozenOrderedDict(temp_full_symbol_state_map) self._index_state_map = container.FrozenOrderedDict(temp_index_state_map) def set_state_as_attribute(self, state, attr_name=None): """ Sets the given state as an attribute of this alphabet. The name of the attribute will be ``attr_name`` if specified, or the state symbol otherwise. Parameters ---------- state : |StateIdentity| The state to be made an attribute of this alphabet. attr_name : string The name of the attribute. If not specified, the state symbol will be used. """ if (state not in self._fundamental_states and state not in self._ambiguous_states and state not in self._polymorphic_states): raise ValueError("State {} not defined in current alphabet".format(state)) if attr_name is None: attr_name = state.symbol if attr_name is None: raise TypeError("Cannot set attribute: non-None symbol needed for state or non-None attribute name needs to be provided") setattr(self, attr_name, state) ########################################################################### ### Special handling to designate gap def _get_gap_symbol(self): return self._gap_symbol def _set_gap_symbol(self, gap_symbol): """ For state alphabets with no explicitly-defined gap and no data (missing) symbols, this method will allow creation of mapping of gaps to no data states, so that tree/data scoring methods that require gaps to be treated as missing data can be used. Note that the gap state needs to be already defined in the state alphabet and already associated with the designated symbol. """ if gap_symbol is not None: self.gap_state = self[gap_symbol] self._gap_symbol = gap_symbol else: self.gap_state = None self._gap_symbol = None gap_symbol = property(_get_gap_symbol, _set_gap_symbol) def _get_no_data_symbol(self): return self._no_data_symbol def _set_no_data_symbol(self, no_data_symbol): if no_data_symbol is not None: self.no_data_state = self[no_data_symbol] self._no_data_symbol = no_data_symbol else: self.no_data_state = None self._no_data_symbol = None no_data_symbol = property(_get_no_data_symbol, _set_no_data_symbol) ########################################################################### ### Symbol Access def __len__(self): """ Number of states. """ return ( len(self._fundamental_states) + len(self._ambiguous_states) + len(self._polymorphic_states) ) def __iter__(self): """ Returns :meth:`StateAlphabet.state_iter()`: iterator over all state identities. """ return self.state_iter() def state_iter(self): """ Returns an iterator over all state identities. """ return itertools.chain( self._fundamental_states, self._ambiguous_states, self._polymorphic_states) def fundamental_state_iter(self): """ Returns an iterator over all fundamental state identities. """ return itertools.chain(self._fundamental_states) def ambiguous_state_iter(self): """ Returns an iterator over all ambiguous state identities. """ return itertools.chain(self._ambiguous_states) def polymorphic_state_iter(self): """ Returns an iterator over all polymorphic state identities. """ return itertools.chain(self._polymorphic_states) def multistate_state_iter(self): """ Returns an iterator over all ambiguous and polymorphic state identities. """ return itertools.chain(self._ambiguous_states, self._polymorphic_states) def fundamental_symbol_iter(self, include_synonyms=True): """ Returns an iterator over all symbols (including synonyms, unless ``include_synonyms`` is |False|) that map to fundamental states. """ for state in self.fundamental_state_iter(): yield state.symbol if state.symbol_synonyms and include_synonyms: for symbol in state.symbol_synonyms: yield symbol def ambiguous_symbol_iter(self, include_synonyms=True): """ Returns an iterator over all symbols (including synonyms, unless ``include_synonyms`` is |False|) that map to ambiguous states. """ for state in self.ambiguous_state_iter(): yield state.symbol if state.symbol_synonyms and include_synonyms: for symbol in state.symbol_synonyms: yield symbol def polymorphic_symbol_iter(self, include_synonyms=True): """ Returns an iterator over all symbols (including synonyms, unless ``include_synonyms`` is |False|) that map to polymorphic states. """ for state in self.polymorphic_state_iter(): yield state.symbol if state.symbol_synonyms and include_synonyms: for symbol in state.symbol_synonyms: yield symbol def multistate_symbol_iter(self, include_synonyms=True): """ Returns an iterator over all symbols (including synonyms, unless ``include_synonyms`` is |False|) that map to multistate states. """ for state in self.multistate_state_iter(): yield state.symbol if state.symbol_synonyms and include_synonyms: for symbol in state.symbol_synonyms: yield symbol def symbol_state_pair_iter(self, include_synonyms=True): """ Returns an iterator over all symbols paired with the state to which the they symbols map. """ for state in self.state_iter(): yield (state.symbol, state) if include_synonyms: for synonym in state.symbol_synonyms: yield (synonym, state) def _get_state_identities(self): """ Tuple of all state identities in this alphabet. """ return self._state_identities states = property(_get_state_identities) def _get_canonical_state_symbols(self): """ Tuple of all state symbols in this alphabet. """ return self._canonical_state_symbols symbols = property(_get_canonical_state_symbols) def _get_canonical_symbol_state_map(self): """ Dictionary with state symbols as keys and states as values. Does not include symbol synonyms or case variations. """ return self._canonical_symbol_state_map canonical_symbol_state_map = property(_get_canonical_symbol_state_map) def _get_full_symbol_state_map(self): """ Dictionary with state symbols as keys and states as values. Includes symbol synonyms or case variations. """ return self._full_symbol_state_map full_symbol_state_map = property(_get_full_symbol_state_map) def __getitem__(self, key): """ Returns state identity corresponding to ``key``. Parameters ---------- key : integer or string If and integer value, looks up and returns state identity by index. If a string value, looks up and returns state identity by symbol. Returns ------- s : |StateIdentity| instance Returns a |StateIdentity| corresponding to ``key``. Raises ------ KeyError if ``key`` is not valid. """ if isinstance(key, int): return self._index_state_map[key] else: return self._full_symbol_state_map[key] def get_states_for_symbols(self, symbols): """ Returns list of states corresponding to symbols. Parameters ---------- symbols : iterable of symbols Returns ------- s : list of |StateIdentity| A list of |StateIdentity| instances corresponding to symbols given in ``symbols``. """ states = [self.full_symbol_state_map[s] for s in symbols] return states def get_fundamental_states_for_symbols(self, symbols): """ Returns list of *fundamental* states corresponding to symbols. Parameters ---------- symbols : iterable of symbols Returns ------- s : list of |StateIdentity| A list of fundamental |StateIdentity| instances corresponding to symbols given in ``symbols``, with multi-state states expanded into their fundamental symbols. """ states = [] for symbol in symbols: state = self._full_symbol_state_map[symbol] states.extend(state.fundamental_states) return states def get_canonical_symbol_for_symbol(self, symbol): """ Returns the canonical state symbol for the state to which ``symbol`` maps. E.g., in a DNA alphabet, return 'A' for 'a'. Parameters ---------- symbol : string Returns ------- s : string Canonical symbol for state with symbol or synonym symbol of ``symbol``. """ return self[symbol].symbol def match_ambiguous_state(self, symbols): """ Returns ambiguous state with fundamental member states represented by symbols given in ``symbols``. Parameters ---------- symbols : iterable of symbols Returns ------- s : |StateIdentity| instance """ states = frozenset(self.get_fundamental_states_for_symbols(symbols)) return self._fundamental_states_to_ambiguous_state_map[states] def match_polymorphic_state(self, symbols): """ Returns polymorphic state with fundamental member states represented by symbols given in ``symbols``. Parameters ---------- symbols : iterable of symbols Returns ------- s : |StateIdentity| instance """ states = frozenset(self.get_fundamental_states_for_symbols(symbols)) return self._fundamental_states_to_polymorphic_state_map[states] def match_state(self, symbols, state_denomination): """ Returns ambiguous or polymorphic state with fundamental member states represented by symbols given in ``symbols``. Parameters ---------- symbols : iterable of string symbols Symbols representing states to be dereferenced. state_denomination : {StateAlphabet.AMBIGUOUS or StateAlphabet.POLYPMORPHIC_STATE} Returns ------- s : |StateIdentity| instance """ if state_denomination == StateAlphabet.AMBIGUOUS_STATE: return self.match_ambiguous_state(symbols=symbols) else: return self.match_polymorphic_state(symbols=symbols) ############################################################################### ## StateIdentity class StateIdentity( basemodel.DataObject, basemodel.Annotable): """ A character state definition, which can either be a fundamental state or a mapping to a set of other character states (for polymorphic or ambiguous characters). """ def __init__(self, symbol=None, index=None, state_denomination=StateAlphabet.FUNDAMENTAL_STATE, member_states=None): """ A state is immutable with respect to its definition and identity. Specifically, it 'symbol', 'index', 'multistate', and 'member_states' properties are set upon definition/creation, and after that are read-only. Parameters ---------- symbol : string A text symbol or token representation of this character state. E.g., 'G' for the base guanine in a DNA state alphabet, or '1' for presence of a wing in a morphological data set. index : integer The (0-based) numeric index for this state in the state alphabet. E.g., for a DNA alphabet: 0 = 'A'/adenine, 1 = 'C'/cytosine, 2 = 'G'/guanine, 3 = 'T'/thymine. Or for a "standard" alphabet: 0 = '0', 1 = '1'. Note that ambiguous and polymorphic state definitions typically are not indexed. state_denomination : 'enum' One of: ``StateAlphabet.FUNDAMENTAL_STATE``, ``StateAlphabet.AMBIGUOUS_STATE``, or ``StateAlphabet.POLYMORPHIC_STATE``. member_states : iterable of |StateIdentity| instances. If a multi-state, then a collection of |StateIdentity| instances to which this state maps. """ basemodel.DataObject.__init__(self, label=symbol) self._symbol = symbol self._index = index self._state_denomination = state_denomination self._member_states = None self._fundamental_states = None self._fundamental_symbols = None self._fundamental_indexes = None self._fundamental_indexes_with_gaps_as_missing = None self._partials_vector = None if member_states is not None: self._member_states = tuple(member_states) else: self._member_states = None self._str = None self._repr = None self._member_states_str = None self._symbol_synonyms = [] # special handling for treating gap states as missing/no-data states self.is_gap_state = None self.gap_state_as_no_data_state = None def __hash__(self): return id(self) def __eq__(self, other): return other is self def __copy__(self, memo=None): return self def taxon_namespace_scoped_copy(self, memo=None): return self def __deepcopy__(self, memo=None): return self def _get_index(self): return self._index index = property(_get_index) def __str__(self): if self._str is None: if self._symbol: self._str = str(self._symbol) elif self._state_denomination == StateAlphabet.FUNDAMENTAL_STATE: self._str = "" else: self._str = self.member_states_str return self._str def __repr__(self): if self._repr is None: s = str(self) self._repr = "<{} at {}: '{}'>".format(self.__class__.__name__, hex(id(self)), str(s)) return self._repr def _get_member_states_str(self): """ Representation of member states of self. """ if self._member_states_str is None: if self._state_denomination == StateAlphabet.FUNDAMENTAL_STATE: self._member_states_str = str(self) else: s = ",".join([m._symbol for m in self._member_states]) if self._state_denomination == StateAlphabet.AMBIGUOUS_STATE: self._member_states_str = "{" + s + "}" elif self._state_denomination == StateAlphabet.POLYMORPHIC_STATE: self._member_states_str = "(" + s + ")" else: raise ValueError("Unrecognized state denomination: '{}'".format(self._state_denomination)) return self._member_states_str member_states_str = property(_get_member_states_str) def _get_symbol(self): """ Canonical (primary) symbol of this state. """ return self._symbol symbol = property(_get_symbol) def _get_state_denomination(self): """ Type of multi-statedness: FUNDAMENTAL (not a multistate), AMBIGUOUS, or POLYMORPHIC. """ return self._state_denomination state_denomination = property(_get_state_denomination) def _is_single_state(self): """ |True| if a FUNDAMENTAL state. """ return self._state_denomination == StateAlphabet.FUNDAMENTAL_STATE is_single_state = property(_is_single_state) def _is_fundamental_state(self): """ |True| if a FUNDAMENTAL state. """ return self._state_denomination == StateAlphabet.FUNDAMENTAL_STATE is_fundamental_state = property(_is_fundamental_state) def _get_member_states(self): """ Returns the (fundamental) member states that this state maps to if not itself a fundamental state. """ return self._member_states def _set_member_states(self, member_states): """ Rebuilds member state set. """ if member_states is not None: self._member_states = tuple(member_states) else: self._member_states = None self._fundamental_states = None self._fundamental_symbols = None self._fundamental_indexes = None self._fundamental_indexes_with_gaps_as_missing = None self._partials_vector = None self._str = None self._repr = None self._member_states_str = None member_states = property(_get_member_states, _set_member_states) def _get_fundamental_states(self): """ Returns a tuple of fundamental states (i.e., tupe of single states) to which this state maps. """ if self._fundamental_states is None: if self._member_states is None: states = {self:True} else: states = collections.OrderedDict() for state in self._member_states: assert state is not self for s in state.fundamental_states: states[s] = True self._fundamental_states = tuple(states.keys()) return self._fundamental_states fundamental_states = property(_get_fundamental_states) def _get_fundamental_symbols(self): """ Returns a tuple of fundamental state symbols (i.e., tuple of symbols representing single states) to which this state maps. """ if self._fundamental_symbols is None: self._fundamental_symbols = tuple(state.symbol for state in self.fundamental_states) return self._fundamental_symbols fundamental_symbols = property(_get_fundamental_symbols) def _get_fundamental_indexes(self): """ Returns a tuple of fundamental state indexes (i.e., tuple of index values of single states) to which this state maps. """ if self._fundamental_indexes is None: self._fundamental_indexes = tuple([state._index for state in self.fundamental_states]) return self._fundamental_indexes fundamental_indexes = property(_get_fundamental_indexes) def _get_fundamental_indexes_with_gaps_as_missing(self): """ Returns a tuple of fundamental state indexes (i.e., tuple of index values of single states) to which this state maps, with gaps being substituted with missing (no-data) states. """ if self._fundamental_indexes_with_gaps_as_missing is None: if self.is_gap_state: if self.gap_state_as_no_data_state is not None: self._fundamental_indexes_with_gaps_as_missing = tuple(self.gap_state_as_no_data_state.fundamental_indexes_with_gaps_as_missing) else: raise ValueError("No data state not specified") else: fstates = [s for s in self.fundamental_states if not s.is_gap_state] self._fundamental_indexes_with_gaps_as_missing = tuple([s._index for s in fstates]) return self._fundamental_indexes_with_gaps_as_missing fundamental_indexes_with_gaps_as_missing = property(_get_fundamental_indexes_with_gaps_as_missing) def _get_symbol_synonyms(self): """ The collection of symbol synonyms (alternatives/equivalents to the canonical symbol) which also map to this state. """ return self._symbol_synonyms # def _set_symbol_synonyms(self, value): # self._symbol_synonyms = value # symbol_synonyms = property(_get_symbol_synonyms, _set_symbol_synonyms) symbol_synonyms = property(_get_symbol_synonyms) def is_exact_correspondence(self, other): """ Tries to determine if two StateIdentity definitions are equivalent by matching symbols. """ match = True if self._state_denomination != other._state_denomination: return False if self._state_denomination != StateAlphabet.FUNDAMENTAL_STATE and other._state_denomination != StateAlphabet.FUNDAMENTAL_STATE: xf1 = self.fundamental_states xf2 = other.fundamental_states if len(xf1) != len(xf2): match = False else: f1 = set(xf1) f2 = set(xf2) for m1 in f1: member_match = False for m2 in f2: if m1.is_exact_correspondence(m2): member_match = True f2.remove(m2) break if not member_match: match = False break if match: f1 = set(xf1) f2 = set(xf2) for m2 in f2: member_match = False for m1 in f1: if m1.is_exact_correspondence(m2): f1.remove(m1) member_match = True break if not member_match: match = False break return match else: return self._symbol == other._symbol ############################################################################### ## DnaStateAlphabet class DnaStateAlphabet(StateAlphabet): def __init__(self): fundamental_states = "ACGT" polymorphic_states = None ambiguous_states = ( ("N", "ACGT"), ("R", "AG" ), ("Y", "CT" ), ("M", "AC" ), ("W", "AT" ), ("S", "CG" ), ("K", "GT" ), ("V", "ACG" ), ("H", "ACT" ), ("D", "AGT" ), ("B", "CGT" ), ) symbol_synonyms = {"X": "N"} StateAlphabet.__init__(self, fundamental_states=fundamental_states, no_data_symbol="?", gap_symbol="-", polymorphic_states=polymorphic_states, ambiguous_states=ambiguous_states, symbol_synonyms=symbol_synonyms, label="DNA", case_sensitive=False) for state in self.state_iter(): if state.symbol == "-": attr_name = "gap" else: attr_name = state.symbol self.set_state_as_attribute(state, attr_name) self.any_residue = self.N self.unknown_state_symbol = 'N' ############################################################################### ## RnaStateAlphabet class RnaStateAlphabet(StateAlphabet): def __init__(self): fundamental_states = "ACGU" polymorphic_states = None ambiguous_states = ( ("N", "ACGU"), ("R", "AG" ), ("Y", "CU" ), ("M", "AC" ), ("W", "AU" ), ("S", "CG" ), ("K", "GU" ), ("V", "ACG" ), ("H", "ACU" ), ("D", "AGU" ), ("B", "CGU" ), ) symbol_synonyms = {"X": "N"} StateAlphabet.__init__(self, fundamental_states=fundamental_states, no_data_symbol="?", gap_symbol="-", polymorphic_states=polymorphic_states, ambiguous_states=ambiguous_states, symbol_synonyms=symbol_synonyms, label="RNA", case_sensitive=False) for state in self.state_iter(): if state.symbol == "-": attr_name = "gap" else: attr_name = state.symbol self.set_state_as_attribute(state, attr_name) self.any_residue = self.N self.unknown_state_symbol = 'N' ############################################################################### ## NucleotideStateAlphabet class NucleotideStateAlphabet(StateAlphabet): def __init__(self): fundamental_states = "ACGTU" polymorphic_states = None ambiguous_states = ( ("N", "ACGTU"), ("R", "AG" ), ("Y", "CTU" ), ("M", "AC" ), ("W", "ATU" ), ("S", "CG" ), ("K", "GTU" ), ("V", "ACG" ), ("H", "ACTU" ), ("D", "AGTU" ), ("B", "CGTU" ), ) symbol_synonyms = {"X": "N"} StateAlphabet.__init__(self, fundamental_states=fundamental_states, no_data_symbol="?", gap_symbol="-", polymorphic_states=polymorphic_states, ambiguous_states=ambiguous_states, symbol_synonyms=symbol_synonyms, label="Nucleotide", case_sensitive=False) for state in self.state_iter(): if state.symbol == "-": attr_name = "gap" else: attr_name = state.symbol self.set_state_as_attribute(state, attr_name) self.any_residue = self.N self.unknown_state_symbol = 'N' ############################################################################### ## ProteinStateAlphabet class ProteinStateAlphabet(StateAlphabet): def __init__(self): fundamental_states = "ACDEFGHIKLMNPQRSTVWY*" polymorphic_states = None ambiguous_states = ( ("B", "DN"), ("Z", "EQ"), ("X", "ACDEFGHIKLMNPQRSTVWY*"), ) symbol_synonyms = {} StateAlphabet.__init__(self, fundamental_states=fundamental_states, no_data_symbol="?", gap_symbol="-", polymorphic_states=polymorphic_states, ambiguous_states=ambiguous_states, symbol_synonyms=symbol_synonyms, label="Protein", case_sensitive=False) for state in self.state_iter(): if state.symbol == "-": attr_name = "gap" elif state.symbol == "*": attr_name = "stop" else: attr_name = state.symbol self.set_state_as_attribute(state, attr_name) self.any_residue = self.X self.unknown_state_symbol = 'X' ############################################################################### ## BinaryStateAlphabet class BinaryStateAlphabet(StateAlphabet): def __init__(self, allow_gaps=False, allow_missing=False): fundamental_states = "10" if allow_gaps: gap_symbol = "-" else: gap_symbol = None polymorphic_states = None ambiguous_states = [] if allow_missing: no_data_symbol = "?" else: no_data_symbol = None symbol_synonyms = {} StateAlphabet.__init__(self, fundamental_states=fundamental_states, no_data_symbol=no_data_symbol, gap_symbol=gap_symbol, polymorphic_states=polymorphic_states, ambiguous_states=ambiguous_states, symbol_synonyms=symbol_synonyms, label="Binary", case_sensitive=False) for state in self.state_iter(): if state.symbol == "-": attr_name = "gap" elif state.symbol == "?": attr_name = "missing" elif state.symbol == "*": attr_name = "stop" else: attr_name = state.symbol self.set_state_as_attribute(state, attr_name) ############################################################################### ## RestrictionSitesStateAlphabet class RestrictionSitesStateAlphabet(BinaryStateAlphabet): def __init__(self, allow_gaps=False, allow_missing=False): BinaryStateAlphabet.__init__(self, allow_gaps=allow_gaps, allow_missing=allow_missing) ############################################################################### ## InfiniteSitesStateAlphabet class InfiniteSitesStateAlphabet(BinaryStateAlphabet): def __init__(self, allow_gaps=False, allow_missing=False): BinaryStateAlphabet.__init__(self, allow_gaps=allow_gaps, allow_missing=allow_missing) ############################################################################### ## GLOBAL STATE ALPHABETS DNA_STATE_ALPHABET = DnaStateAlphabet() RNA_STATE_ALPHABET = RnaStateAlphabet() NUCLEOTIDE_STATE_ALPHABET = NucleotideStateAlphabet() BINARY_STATE_ALPHABET = BinaryStateAlphabet() PROTEIN_STATE_ALPHABET = ProteinStateAlphabet() RESTRICTION_SITES_STATE_ALPHABET = RestrictionSitesStateAlphabet() INFINITE_SITES_STATE_ALPHABET = InfiniteSitesStateAlphabet() def new_standard_state_alphabet( fundamental_state_symbols=None, case_sensitive=False): if fundamental_state_symbols is None: fundamental_state_symbols = "0123456789" s = StateAlphabet( fundamental_states=fundamental_state_symbols, no_data_symbol="?", gap_symbol="-", # polymorphic_states=polymorphic_states, # ambiguous_states=ambiguous_states, # symbol_synonyms=symbol_synonyms, label="Standard", case_sensitive=case_sensitive) for state in s.state_iter(): if state.symbol == "-": attr_name = "gap" else: attr_name = state.symbol s.set_state_as_attribute(state, attr_name) return s ############################################################################### ## Convenience Functions def coerce_to_state_identities(state_alphabet, values): coerced_values = [] for v in values: if isinstance(v, StateIdentity): coerced_values.append(v) elif textprocessing.is_str_type(v) or isinstance(v, int): s = state_alphabet[v] coerced_values.append(s) else: raise ValueError(v) return coerced_values
PypiClean
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Gyre-Pagella/Script/Regular/Main.js
MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.GyrePagellaMathJax_Script={directory:"Script/Regular",family:"GyrePagellaMathJax_Script",testString:"\u00A0\u210A\u210B\u2110\u2112\u211B\u212C\u212F\u2130\u2131\u2133\u2134\uD835\uDC9C\uD835\uDC9E\uD835\uDC9F",32:[0,0,250,0,0],160:[0,0,250,0,0],8458:[398,398,508,-62,562],8459:[798,18,790,55,844],8464:[787,8,577,75,647],8466:[755,8,789,52,842],8475:[764,8,905,55,958],8492:[764,8,887,47,940],8495:[398,8,406,21,459],8496:[757,8,569,55,623],8497:[802,8,754,55,835],8499:[783,73,940,47,1011],8500:[398,8,471,23,524],119964:[783,8,783,33,836],119966:[757,8,568,55,621],119967:[764,8,867,39,920],119970:[757,406,637,51,690],119973:[787,406,679,55,749],119974:[788,8,881,62,935],119977:[783,8,832,42,896],119978:[757,8,788,53,841],119979:[764,8,833,55,887],119980:[757,244,788,56,841],119982:[764,8,608,62,662],119983:[897,8,555,43,971],119984:[798,8,657,51,710],119985:[816,8,606,52,659],119986:[816,8,948,48,1001],119987:[757,8,672,60,725],119988:[798,406,649,51,702],119989:[764,14,673,47,732],119990:[398,8,567,23,620],119991:[788,8,465,18,519],119992:[398,8,406,21,459],119993:[788,8,567,23,620],119995:[788,390,247,-83,300],119997:[788,8,524,-10,577],119998:[632,8,244,30,298],119999:[632,398,215,-309,268],120000:[788,8,456,-10,510],120001:[788,8,265,17,319],120002:[398,8,753,12,806],120003:[398,8,520,12,573],120005:[398,398,485,-118,538],120006:[398,397,486,23,540],120007:[421,0,442,39,495],120008:[421,8,413,-26,467],120009:[655,8,286,23,339],120010:[390,8,540,39,593],120011:[420,8,491,39,545],120012:[420,8,649,39,702],120013:[398,8,488,25,541],120014:[390,398,530,-39,584],120015:[404,8,437,-13,490],120016:[785,10,796,30,853],120017:[767,10,913,44,970],120018:[759,10,568,39,625],120019:[767,10,880,36,937],120020:[759,10,569,39,627],120021:[807,10,761,52,850],120022:[759,408,664,35,722],120023:[801,22,803,39,861],120024:[789,10,590,73,663],120025:[789,408,692,39,764],120026:[790,10,894,47,952],120027:[758,10,789,36,846],120028:[785,77,966,43,1040],120029:[785,10,852,39,917],120030:[759,10,801,51,858],120031:[767,10,846,47,904],120032:[759,250,801,53,858],120033:[767,10,943,39,975],120034:[767,10,615,60,672],120035:[900,10,555,40,972],120036:[801,10,696,48,753],120037:[819,10,632,49,689],120038:[819,10,987,49,1044],120039:[759,10,685,57,742],120040:[801,408,688,48,745],120041:[767,17,673,43,736],120042:[400,10,606,21,663],120043:[790,10,491,16,549],120044:[400,10,432,20,489],120045:[790,10,606,21,663],120046:[400,10,419,18,476],120047:[790,393,274,-86,332],120048:[400,400,534,-52,592],120049:[790,10,563,-13,620],120050:[649,10,270,34,328],120051:[647,400,228,-312,285],120052:[790,10,489,-3,546],120053:[790,10,291,16,348],120054:[400,10,805,10,862],120055:[400,10,559,10,616],120056:[400,10,497,21,554],120057:[400,400,511,-134,568],120058:[400,399,525,21,582],120059:[424,3,481,38,540],120060:[424,10,426,-29,484],120061:[658,10,299,21,356],120062:[393,10,579,35,636],120063:[423,10,511,35,568],120064:[423,10,688,35,745],120065:[400,10,514,35,571],120066:[393,400,558,-16,615],120067:[408,10,437,-16,494]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"GyrePagellaMathJax_Script"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Script/Regular/Main.js"]);
PypiClean
/D3HRE-1.0.1.tar.gz/D3HRE-1.0.1/opendap_download/multi_processing_download.py
__author__ = "Jan Urbansky" __contributor__ = "Yu Cao" # TODO: Change and describe structure of the links that have to be provided. # TODO: Proper readme with examples. from multiprocessing.dummy import Pool as Threadpool import requests import logging import os import urllib.response from http import cookiejar import urllib.error import urllib.request import re import ruamel.yaml as yaml log = logging.getLogger('opendap_download') class DownloadManager(object): __AUTHENTICATION_URL = 'https://urs.earthdata.nasa.gov/oauth/authorize' __username = '' __password = '' __download_urls = [] __download_path = '' _authenticated_session = None def __init__(self, username='', password='', links=None, download_path='download'): self.set_username_and_password(username, password) self.download_urls = links self.download_path = download_path if logging.getLogger().getEffectiveLevel() == logging.INFO: logging.getLogger("requests").setLevel(logging.CRITICAL) logging.getLogger("urllib3").setLevel(logging.CRITICAL) log.debug('Init DownloadManager') @property def download_urls(self): return self.__download_urls @download_urls.setter def download_urls(self, links): """ Setter for the links to download. The links have to be an array containing the URLs. The module will figure out the filename from the url and save it to the folder provided with download_path() :param links: The links to download :type links: List[str] """ # TODO: Check if links have the right structure? Read filename from links? # Check if all links are formed properly if links is None: self.__download_urls = [] else: for item in links: try: self.get_filename(item) except AttributeError: raise ValueError('The URL seems to not have the right structure: ', item) self.__download_urls = links @property def download_path(self): return self.__download_path @download_path.setter def download_path(self, file_path): self.__download_path = file_path def set_username_and_password(self, username, password): self.__username = username self.__password = password def read_credentials_from_yaml(self, file_path_to_yaml): with open(file_path_to_yaml, 'r') as f: credentials = yaml.load(f) self.set_username_and_password(credentials['username'], credentials['password']) def _mp_download_wrapper(self, url_item): """ Wrapper for parallel download. The function name cannot start with __ due to visibility issues. :param url_item: :type url_item: :return: :rtype: """ query = url_item file_path = os.path.join(self.download_path, self.get_filename(query)) self.__download_and_save_file(query, file_path) def start_download(self, nr_of_threads=4): if self._authenticated_session is None: self._authenticated_session = self.__create_authenticated_sesseion() # Create the download folder. os.makedirs(self.download_path, exist_ok=True) # p = multiprocessing.Pool(nr_of_processes) p = Threadpool(nr_of_threads) p.map(self._mp_download_wrapper, self.download_urls) p.close() p.join() @staticmethod def get_filename(url): """ Extracts the filename from the url. This method can also be used to check if the links have the correct structure :param url: The MERRA URL :type url: str :return: The filename :rtype: str """ # This method is changed due to the additional spatial and break day index string = url.rsplit('/',1)[-1] starttime = string.split('[')[1].split(':')[0].zfill(2) endtime = string.split('[')[1].split(':')[1][0:-1].zfill(2) # this file name is illegal on file system reg_exp = r'(?<=/)[^/]*(?=.nc4?)' file_name = re.search(reg_exp, url).group(0) filenamelist = file_name.split('.') filenamelist.insert(3, endtime) filenamelist.insert(3, starttime) name = '.'.join(filenamelist) return name def __download_and_save_file(self, url, file_path): r = self._authenticated_session.get(url, stream=True) with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) return r.status_code def __create_authenticated_sesseion(self): s = requests.Session() s.headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36'} s.auth = (self.__username, self.__password) s.cookies = self.__authorize_cookies_with_urllib() if logging.getLogger().getEffectiveLevel() == logging.DEBUG: r = s.get(self.download_urls[0]) log.debug('Authentication Status') log.debug(r.status_code) log.debug(r.headers) log.debug(r.cookies) log.debug('Sessions Data') log.debug(s.cookies) log.debug(s.headers) return s def __authorize_cookies_with_urllib(self): username = self.__username password = self.__password top_level_url = "https://urs.earthdata.nasa.gov" # create an authorization handler p = urllib.request.HTTPPasswordMgrWithDefaultRealm() p.add_password(None, top_level_url, username, password); auth_handler = urllib.request.HTTPBasicAuthHandler(p) auth_cookie_jar = cookiejar.CookieJar() cookie_jar = urllib.request.HTTPCookieProcessor(auth_cookie_jar) opener = urllib.request.build_opener(auth_handler, cookie_jar) urllib.request.install_opener(opener) try: # The merra portal moved the authentication to the download level. Before this change you had to # provide username and password on the overview page. For example: # https://goldsmr4.gesdisc.eosdis.nasa.gov/opendap/MERRA2/ # authentication_url = 'https://goldsmr4.gesdisc.eosdis.nasa.gov/opendap/MERRA2/M2T1NXSLV.5.12.4/1980/01/MERRA2_100.tavg1_2d_slv_Nx.19800101.nc4.ascii?U2M[0:1:1][0:1:1][0:1:1]' # Changes: # Authenticate with the first url in the links. # Request the website and initialiaze the BasicAuth. This will populate the auth_cookie_jar authentication_url = self.download_urls[0] result = opener.open(authentication_url) log.debug(list(auth_cookie_jar)) log.debug(list(auth_cookie_jar)[0]) log.debug(list(auth_cookie_jar)[1]) except urllib.error.HTTPError: raise ValueError('Username and or Password are not correct!') except IOError as e: log.warning(e) raise IOError except IndexError as e: log.warning(e) raise IndexError('download_urls is not set') return auth_cookie_jar if __name__ == '__main__': link = [ 'https://goldsmr4.gesdisc.eosdis.nasa.gov/opendap/MERRA2/M2T1NXSLV.5.12.4/2014/01/MERRA2_400.tavg1_2d_slv_Nx.20140101.nc4.nc4?U2M[0:1:5][358:1:360][573:1:575],U10M[0:1:5][358:1:360][573:1:575],U50M[0:1:5][358:1:360][573:1:575],V2M[0:1:5][358:1:360][573:1:575],V10M[0:1:5][358:1:360][573:1:575],V50M[0:1:5][358:1:360][573:1:575]'] logging.basicConfig(level=logging.DEBUG, handlers=[logging.StreamHandler()]) dl = DownloadManager() dl.download_path = 'downlaod123' dl.read_credentials_from_yaml((os.path.join(os.path.dirname(os.path.realpath(__file__)), 'authentication.yaml'))) dl.download_urls = link dl.start_download()
PypiClean
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/exports/glances_couchdb.py
import sys from datetime import datetime from glances.logger import logger from glances.exports.glances_export import GlancesExport import couchdb import couchdb.mapping class Export(GlancesExport): """This class manages the CouchDB export module.""" def __init__(self, config=None, args=None): """Init the CouchDB export IF.""" super(Export, self).__init__(config=config, args=args) # Mandatory configuration keys (additional to host and port) self.db = None # Optional configuration keys self.user = None self.password = None # Load the Cassandra configuration file section self.export_enable = self.load_conf('couchdb', mandatories=['host', 'port', 'db'], options=['user', 'password']) if not self.export_enable: sys.exit(2) # Init the CouchDB client self.client = self.init() def init(self): """Init the connection to the CouchDB server.""" if not self.export_enable: return None if self.user is None: server_uri = 'http://{}:{}/'.format(self.host, self.port) else: # Force https if a login/password is provided # Related to https://github.com/nicolargo/glances/issues/2124 server_uri = 'https://{}:{}@{}:{}/'.format(self.user, self.password, self.host, self.port) try: s = couchdb.Server(server_uri) except Exception as e: logger.critical("Cannot connect to CouchDB server %s (%s)" % (server_uri, e)) sys.exit(2) else: logger.info("Connected to the CouchDB server") try: s[self.db] except Exception: # Database did not exist # Create it... s.create(self.db) else: logger.info("There is already a %s database" % self.db) return s def database(self): """Return the CouchDB database object""" return self.client[self.db] def export(self, name, columns, points): """Write the points to the CouchDB server.""" logger.debug("Export {} stats to CouchDB".format(name)) # Create DB input data = dict(zip(columns, points)) # Set the type to the current stat name data['type'] = name data['time'] = couchdb.mapping.DateTimeField()._to_json(datetime.now()) # Write data to the CouchDB database # Result can be seen at: http://127.0.0.1:5984/_utils try: self.client[self.db].save(data) except Exception as e: logger.error("Cannot export {} stats to CouchDB ({})".format(name, e))
PypiClean
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/tutorial/solvers.rst
========= Solvers ========= .. >>> init_printing(pretty_print=True, use_unicode=True) This section covers equations solving. .. note:: Any expression in input, that not in an :class:`~diofant.core.relational.Eq` is automatically assumed to be equal to 0 by the solving functions. Algebraic Equations =================== The main function for solving algebraic equations is :func:`~diofant.solvers.solvers.solve`. When solving a single equation, the output is a list of the solutions. >>> solve(x**2 - x) [{x: 0}, {x: 1}] If no solutions are found, an empty list is returned. >>> solve(exp(x)) [] :func:`~diofant.solvers.solvers.solve` can also solve systems of equations. >>> solve([x - y + 2, x + y - 3]) [{x: 1/2, y: 5/2}] >>> solve([x*y - 7, x + y - 6]) ⎡⎧ ___ ___ ⎫ ⎧ ___ ___ ⎫⎤ ⎢⎨x: - ╲╱ 2 + 3, y: ╲╱ 2 + 3⎬, ⎨x: ╲╱ 2 + 3, y: - ╲╱ 2 + 3⎬⎥ ⎣⎩ ⎭ ⎩ ⎭⎦ Each solution reported only once: >>> solve(x**3 - 6*x**2 + 9*x) [{x: 0}, {x: 3}] To get the solutions of a polynomial including multiplicity use :func:`~diofant.polys.polyroots.roots`. >>> roots(x**3 - 6*x**2 + 9*x) {0: 1, 3: 2} Recurrence Equations ==================== To solve recurrence equations, use :func:`~diofant.solvers.recurr.rsolve`. First, create an undefined function by passing ``cls=Function`` to the :func:`~diofant.core.symbol.symbols` function. >>> f = symbols('f', cls=Function) We can call ``f(x)``, and it will represent an unknown function application. .. note:: From here on in this tutorial we assume that these statements were executed: >>> from diofant import * >>> a, b, c, d, t, x, y, z = symbols('a:d t x:z') >>> k, m, n = symbols('k m n', integer=True) >>> f, g, h = symbols('f:h', cls=Function) >>> init_printing(pretty_print=True, use_unicode=True) As for algebraic equations, the output is a list of :class:`dict`'s >>> rsolve(f(n + 1) - 3*f(n) - 1) ⎡⎧ n 1⎫⎤ ⎢⎨f: n ↦ 3 ⋅C₀ - ─⎬⎥ ⎣⎩ 2⎭⎦ The arbitrary constants in the solutions are symbols of the form ``C0``, ``C1``, and so on. Differential Equations ====================== To solve the differential equation >>> Eq(f(x).diff(x, x) - 2*f(x).diff(x) + f(x), sin(x)) 2 d d f(x) - 2⋅──(f(x)) + ───(f(x)) = sin(x) dx 2 dx .. note:: Derivatives of the unknown function ``f(x)`` are unevaluated. we would use >>> dsolve(_) x cos(x) f(x) = ℯ ⋅(C₁ + C₂⋅x) + ────── 2 :func:`~diofant.solvers.ode.dsolve` can also solve systems of equations, like :func:`~diofant.solvers.solvers.solve`. >>> dsolve([f(x).diff(x) - g(x), g(x).diff(x) - f(x)]) ⎡ x -x x -x ⎤ ⎣f(x) = ℯ ⋅C₂ - ℯ ⋅C₁, g(x) = ℯ ⋅C₂ + ℯ ⋅C₁⎦
PypiClean
/BotEXBotBase-3.1.3.tar.gz/BotEXBotBase-3.1.3/redbot/core/commands/requires.py
import asyncio import enum from typing import ( Union, Optional, List, Callable, Awaitable, Dict, Any, TYPE_CHECKING, TypeVar, Tuple, ) import discord from .converter import GuildConverter from .errors import BotMissingPermissions if TYPE_CHECKING: from .commands import Command from .context import Context _CommandOrCoro = TypeVar("_CommandOrCoro", Callable[..., Awaitable[Any]], Command) __all__ = [ "CheckPredicate", "DM_PERMS", "GlobalPermissionModel", "GuildPermissionModel", "PermissionModel", "PrivilegeLevel", "PermState", "Requires", "permissions_check", "bot_has_permissions", "has_permissions", "is_owner", "guildowner", "guildowner_or_permissions", "admin", "admin_or_permissions", "mod", "mod_or_permissions", ] _T = TypeVar("_T") GlobalPermissionModel = Union[ discord.User, discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel, discord.Role, GuildConverter, # Unfortunately this will have to do for now ] GuildPermissionModel = Union[ discord.Member, discord.VoiceChannel, discord.TextChannel, discord.CategoryChannel, discord.Role, GuildConverter, ] PermissionModel = Union[GlobalPermissionModel, GuildPermissionModel] CheckPredicate = Callable[["Context"], Union[Optional[bool], Awaitable[Optional[bool]]]] # Here we are trying to model DM permissions as closely as possible. The only # discrepancy I've found is that users can pin messages, but they cannot delete them. # This means manage_messages is only half True, so it's left as False. # This is also the same as the permissions returned when `permissions_for` is used in DM. DM_PERMS = discord.Permissions.none() DM_PERMS.update( add_reactions=True, attach_files=True, embed_links=True, external_emojis=True, mention_everyone=True, read_message_history=True, read_messages=True, send_messages=True, ) class PrivilegeLevel(enum.IntEnum): """Enumeration for special privileges.""" # Maintainer Note: do NOT re-order these. # Each privelege level also implies access to the ones before it. # Inserting new privelege levels at a later point is fine if that is considered. NONE = enum.auto() """No special privilege level.""" MOD = enum.auto() """User has the mod role.""" ADMIN = enum.auto() """User has the admin role.""" GUILD_OWNER = enum.auto() """User is the guild level.""" BOT_OWNER = enum.auto() """User is a bot owner.""" @classmethod async def from_ctx(cls, ctx: "Context") -> "PrivilegeLevel": """Get a command author's PrivilegeLevel based on context.""" if await ctx.bot.is_owner(ctx.author): return cls.BOT_OWNER elif ctx.guild is None: return cls.NONE elif ctx.author == ctx.guild.owner: return cls.GUILD_OWNER # The following is simply an optimised way to check if the user has the # admin or mod role. guild_settings = ctx.bot.db.guild(ctx.guild) admin_role_id = await guild_settings.admin_role() mod_role_id = await guild_settings.mod_role() is_mod = False for role in ctx.author.roles: if role.id == admin_role_id: return cls.ADMIN elif role.id == mod_role_id: is_mod = True if is_mod: return cls.MOD return cls.NONE def __repr__(self) -> str: return f"<{self.__class__.__name__}.{self.name}>" class PermState(enum.Enum): """Enumeration for permission states used by rules.""" ACTIVE_ALLOW = enum.auto() """This command has been actively allowed, default user checks should be ignored. """ NORMAL = enum.auto() """No overrides have been set for this command, make determination from default user checks. """ PASSIVE_ALLOW = enum.auto() """There exists a subcommand in the `ACTIVE_ALLOW` state, continue down the subcommand tree until we either find it or realise we're on the wrong branch. """ CAUTIOUS_ALLOW = enum.auto() """This command has been actively denied, but there exists a subcommand in the `ACTIVE_ALLOW` state. This occurs when `PASSIVE_ALLOW` and `ACTIVE_DENY` are combined. """ ACTIVE_DENY = enum.auto() """This command has been actively denied, terminate the command chain. """ # The below are valid states, but should not be transitioned to # They should be set if they apply. ALLOWED_BY_HOOK = enum.auto() """This command has been actively allowed by a permission hook. check validation doesn't need this, but is useful to developers""" DENIED_BY_HOOK = enum.auto() """This command has been actively denied by a permission hook check validation doesn't need this, but is useful to developers""" def transition_to( self, next_state: "PermState" ) -> Tuple[Optional[bool], Union["PermState", Dict[bool, "PermState"]]]: return self.TRANSITIONS[self][next_state] @classmethod def from_bool(cls, value: Optional[bool]) -> "PermState": """Get a PermState from a bool or ``NoneType``.""" if value is True: return cls.ACTIVE_ALLOW elif value is False: return cls.ACTIVE_DENY else: return cls.NORMAL def __repr__(self) -> str: return f"<{self.__class__.__name__}.{self.name}>" # Here we're defining how we transition between states. # The dict is in the form: # previous state -> this state -> Tuple[override, next state] # "override" is a bool describing whether or not the command should be # invoked. It can be None, in which case the default permission checks # will be used instead. # There is also one case where the "next state" is dependent on the # result of the default permission checks - the transition from NORMAL # to PASSIVE_ALLOW. In this case "next state" is a dict mapping the # permission check results to the actual next state. PermState.TRANSITIONS = { PermState.ACTIVE_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (True, PermState.ACTIVE_ALLOW), PermState.PASSIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.NORMAL: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (None, PermState.NORMAL), PermState.PASSIVE_ALLOW: (True, {True: PermState.NORMAL, False: PermState.PASSIVE_ALLOW}), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.PASSIVE_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (False, PermState.NORMAL), PermState.PASSIVE_ALLOW: (True, PermState.PASSIVE_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.CAUTIOUS_ALLOW: { PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), PermState.NORMAL: (False, PermState.ACTIVE_DENY), PermState.PASSIVE_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.CAUTIOUS_ALLOW: (True, PermState.CAUTIOUS_ALLOW), PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, PermState.ACTIVE_DENY: { # We can only start from ACTIVE_DENY if it is set on a cog. PermState.ACTIVE_ALLOW: (True, PermState.ACTIVE_ALLOW), # Should never happen PermState.NORMAL: (False, PermState.ACTIVE_DENY), PermState.PASSIVE_ALLOW: (False, PermState.ACTIVE_DENY), # Should never happen PermState.CAUTIOUS_ALLOW: (False, PermState.ACTIVE_DENY), # Should never happen PermState.ACTIVE_DENY: (False, PermState.ACTIVE_DENY), }, } PermState.ALLOWED_STATES = ( PermState.ACTIVE_ALLOW, PermState.PASSIVE_ALLOW, PermState.CAUTIOUS_ALLOW, ) class Requires: """This class describes the requirements for executing a specific command. The permissions described include both bot permissions and user permissions. Attributes ---------- checks : List[Callable[[Context], Union[bool, Awaitable[bool]]]] A list of checks which can be overridden by rules. Use `Command.checks` if you would like them to never be overridden. privilege_level : PrivilegeLevel The required privilege level (bot owner, admin, etc.) for users to execute the command. Can be ``None``, in which case the `user_perms` will be used exclusively, otherwise, for levels other than bot owner, the user can still run the command if they have the required `user_perms`. user_perms : Optional[discord.Permissions] The required permissions for users to execute the command. Can be ``None``, in which case the `privilege_level` will be used exclusively, otherwise, it will pass whether the user has the required `privilege_level` _or_ `user_perms`. bot_perms : discord.Permissions The required bot permissions for a command to be executed. This is not overrideable by other conditions. """ def __init__( self, privilege_level: Optional[PrivilegeLevel], user_perms: Union[Dict[str, bool], discord.Permissions, None], bot_perms: Union[Dict[str, bool], discord.Permissions], checks: List[CheckPredicate], ): self.checks: List[CheckPredicate] = checks self.privilege_level: Optional[PrivilegeLevel] = privilege_level if isinstance(user_perms, dict): self.user_perms: Optional[discord.Permissions] = discord.Permissions.none() _validate_perms_dict(user_perms) self.user_perms.update(**user_perms) else: self.user_perms = user_perms if isinstance(bot_perms, dict): self.bot_perms: discord.Permissions = discord.Permissions.none() _validate_perms_dict(bot_perms) self.bot_perms.update(**bot_perms) else: self.bot_perms = bot_perms self.default_global_rule: PermState = PermState.NORMAL self._global_rules: _IntKeyDict[PermState] = _IntKeyDict() self._default_guild_rules: _IntKeyDict[PermState] = _IntKeyDict() self._guild_rules: _IntKeyDict[_IntKeyDict[PermState]] = _IntKeyDict() @staticmethod def get_decorator( privilege_level: Optional[PrivilegeLevel], user_perms: Dict[str, bool] ) -> Callable[["_CommandOrCoro"], "_CommandOrCoro"]: if not user_perms: user_perms = None def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if asyncio.iscoroutinefunction(func): func.__requires_privilege_level__ = privilege_level func.__requires_user_perms__ = user_perms else: func.requires.privilege_level = privilege_level if user_perms is None: func.requires.user_perms = None else: _validate_perms_dict(user_perms) func.requires.user_perms.update(**user_perms) return func return decorator def get_rule(self, model: Union[int, PermissionModel], guild_id: int) -> PermState: """Get the rule for a particular model. Parameters ---------- model : PermissionModel The model to get the rule for. guild_id : int The ID of the guild for the rule's scope. Set to ``0`` for a global rule. Returns ------- PermState The state for this rule. See the `PermState` class for an explanation. """ if not isinstance(model, int): model = model.id if guild_id: rules = self._guild_rules.get(guild_id, _IntKeyDict()) else: rules = self._global_rules return rules.get(model, PermState.NORMAL) def set_rule(self, model_id: int, rule: PermState, guild_id: int) -> None: """Set the rule for a particular model. Parameters ---------- model_id : PermissionModel The model to add a rule for. rule : PermState Which state this rule should be set as. See the `PermState` class for an explanation. guild_id : int The ID of the guild for the rule's scope. Set to ``0`` for a global rule. """ if guild_id: rules = self._guild_rules.setdefault(guild_id, _IntKeyDict()) else: rules = self._global_rules if rule is PermState.NORMAL: rules.pop(model_id, None) else: rules[model_id] = rule def clear_all_rules(self, guild_id: int) -> None: """Clear all rules of a particular scope. Parameters ---------- guild_id : int The guild ID to clear rules for. If ``0``, this will clear all global rules and leave all guild rules untouched. """ if guild_id: rules = self._guild_rules.setdefault(guild_id, _IntKeyDict()) else: rules = self._global_rules rules.clear() def get_default_guild_rule(self, guild_id: int) -> PermState: """Get the default rule for a guild.""" return self._default_guild_rules.get(guild_id, PermState.NORMAL) def set_default_guild_rule(self, guild_id: int, rule: PermState) -> None: """Set the default rule for a guild.""" self._default_guild_rules[guild_id] = rule async def verify(self, ctx: "Context") -> bool: """Check if the given context passes the requirements. This will check the bot permissions, overrides, user permissions and privilege level. Parameters ---------- ctx : "Context" The invkokation context to check with. Returns ------- bool ``True`` if the context passes the requirements. Raises ------ BotMissingPermissions If the bot is missing required permissions to run the command. CommandError Propogated from any permissions checks. """ await self._verify_bot(ctx) # Owner should never be locked out of commands for user permissions. if await ctx.bot.is_owner(ctx.author): return True # Owner-only commands are non-overrideable, and we already checked for owner. if self.privilege_level is PrivilegeLevel.BOT_OWNER: return False hook_result = await ctx.bot.verify_permissions_hooks(ctx) if hook_result is not None: return hook_result return await self._transition_state(ctx) async def _verify_bot(self, ctx: "Context") -> None: if ctx.guild is None: bot_user = ctx.bot.user else: bot_user = ctx.guild.me bot_perms = ctx.channel.permissions_for(bot_user) if not (bot_perms.administrator or bot_perms >= self.bot_perms): raise BotMissingPermissions(missing=self._missing_perms(self.bot_perms, bot_perms)) async def _transition_state(self, ctx: "Context") -> bool: prev_state = ctx.permission_state cur_state = self._get_rule_from_ctx(ctx) should_invoke, next_state = prev_state.transition_to(cur_state) if should_invoke is None: # NORMAL invokation, we simply follow standard procedure should_invoke = await self._verify_user(ctx) elif isinstance(next_state, dict): # NORMAL to PASSIVE_ALLOW; should we proceed as normal or transition? # We must check what would happen normally, if no explicit rules were set. default_rule = PermState.NORMAL if ctx.guild is not None: default_rule = self.get_default_guild_rule(guild_id=ctx.guild.id) if default_rule is PermState.NORMAL: default_rule = self.default_global_rule if default_rule == PermState.ACTIVE_DENY: would_invoke = False elif default_rule == PermState.ACTIVE_ALLOW: would_invoke = True else: would_invoke = await self._verify_user(ctx) next_state = next_state[would_invoke] ctx.permission_state = next_state return should_invoke async def _verify_user(self, ctx: "Context") -> bool: checks_pass = await self._verify_checks(ctx) if checks_pass is False: return False if self.user_perms is not None: user_perms = ctx.channel.permissions_for(ctx.author) if user_perms.administrator or user_perms >= self.user_perms: return True if self.privilege_level is not None: privilege_level = await PrivilegeLevel.from_ctx(ctx) if privilege_level >= self.privilege_level: return True return False def _get_rule_from_ctx(self, ctx: "Context") -> PermState: author = ctx.author guild = ctx.guild if ctx.guild is None: # We only check the user for DM channels rule = self._global_rules.get(author.id) if rule is not None: return rule return self.default_global_rule rules_chain = [self._global_rules] guild_rules = self._guild_rules.get(ctx.guild.id) if guild_rules: rules_chain.append(guild_rules) channels = [] if author.voice is not None: channels.append(author.voice.channel) channels.append(ctx.channel) category = ctx.channel.category if category is not None: channels.append(category) model_chain = [author, *channels, *author.roles, guild] for rules in rules_chain: for model in model_chain: rule = rules.get(model.id) if rule is not None: return rule del model_chain[-1] # We don't check for the guild in guild rules default_rule = self.get_default_guild_rule(guild.id) if default_rule is PermState.NORMAL: default_rule = self.default_global_rule return default_rule async def _verify_checks(self, ctx: "Context") -> bool: if not self.checks: return True return await discord.utils.async_all(check(ctx) for check in self.checks) @staticmethod def _get_perms_for(ctx: "Context", user: discord.abc.User) -> discord.Permissions: if ctx.guild is None: return DM_PERMS else: return ctx.channel.permissions_for(user) @classmethod def _get_bot_perms(cls, ctx: "Context") -> discord.Permissions: return cls._get_perms_for(ctx, ctx.guild.me if ctx.guild else ctx.bot.user) @staticmethod def _missing_perms( required: discord.Permissions, actual: discord.Permissions ) -> discord.Permissions: # Explained in set theory terms: # Assuming R is the set of required permissions, and A is # the set of the user's permissions, the set of missing # permissions will be equal to R \ A, i.e. the relative # complement/difference of A with respect to R. relative_complement = required.value & ~actual.value return discord.Permissions(relative_complement) @staticmethod def _member_as_user(member: discord.abc.User) -> discord.User: if isinstance(member, discord.Member): # noinspection PyProtectedMember return member._user return member def __repr__(self) -> str: return ( f"<Requires privilege_level={self.privilege_level!r} user_perms={self.user_perms!r} " f"bot_perms={self.bot_perms!r}>" ) # check decorators def permissions_check(predicate: CheckPredicate): """An overwriteable version of `discord.ext.commands.check`. This has the same behaviour as `discord.ext.commands.check`, however this check can be ignored if the command is allowed through a permissions cog. """ def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if hasattr(func, "requires"): func.requires.checks.append(predicate) else: if not hasattr(func, "__requires_checks__"): func.__requires_checks__ = [] # noinspection PyUnresolvedReferences func.__requires_checks__.append(predicate) return func return decorator def bot_has_permissions(**perms: bool): """Complain if the bot is missing permissions. If the user tries to run the command, but the bot is missing the permissions, it will send a message describing which permissions are missing. This check cannot be overridden by rules. """ def decorator(func: "_CommandOrCoro") -> "_CommandOrCoro": if asyncio.iscoroutinefunction(func): func.__requires_bot_perms__ = perms else: _validate_perms_dict(perms) func.requires.bot_perms.update(**perms) return func return decorator def has_permissions(**perms: bool): """Restrict the command to users with these permissions. This check can be overridden by rules. """ if perms is None: raise TypeError("Must provide at least one keyword argument to has_permissions") return Requires.get_decorator(None, perms) def is_owner(): """Restrict the command to bot owners. This check cannot be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.BOT_OWNER, {}) def guildowner_or_permissions(**perms: bool): """Restrict the command to the guild owner or users with these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.GUILD_OWNER, perms) def guildowner(): """Restrict the command to the guild owner. This check can be overridden by rules. """ return guildowner_or_permissions() def admin_or_permissions(**perms: bool): """Restrict the command to users with the admin role or these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.ADMIN, perms) def admin(): """Restrict the command to users with the admin role. This check can be overridden by rules. """ return admin_or_permissions() def mod_or_permissions(**perms: bool): """Restrict the command to users with the mod role or these permissions. This check can be overridden by rules. """ return Requires.get_decorator(PrivilegeLevel.MOD, perms) def mod(): """Restrict the command to users with the mod role. This check can be overridden by rules. """ return mod_or_permissions() class _IntKeyDict(Dict[int, _T]): """Dict subclass which throws KeyError when a non-int key is used.""" def __getitem__(self, key: Any) -> _T: if not isinstance(key, int): raise TypeError("Keys must be of type `int`") return super().__getitem__(key) def __setitem__(self, key: Any, value: _T) -> None: if not isinstance(key, int): raise TypeError("Keys must be of type `int`") return super().__setitem__(key, value) def _validate_perms_dict(perms: Dict[str, bool]) -> None: for perm, value in perms.items(): try: attr = getattr(discord.Permissions, perm) except AttributeError: attr = None if attr is None or not isinstance(attr, property): # We reject invalid permissions raise TypeError(f"Unknown permission name '{perm}'") if value is not True: # We reject any permission not specified as 'True', since this is the only value which # makes practical sense. raise TypeError(f"Permission {perm} may only be specified as 'True', not {value}")
PypiClean
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/specialchar/dialogs/lang/ku.js
/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.plugins.setLang("specialchar","ku",{euro:"نیشانەی یۆرۆ",lsquo:"نیشانەی فاریزەی سەرووژێری تاکی چەپ",rsquo:"نیشانەی فاریزەی سەرووژێری تاکی ڕاست",ldquo:"نیشانەی فاریزەی سەرووژێری دووهێندەی چه‌پ",rdquo:"نیشانەی فاریزەی سەرووژێری دووهێندەی ڕاست",ndash:"تەقەڵی کورت",mdash:"تەقەڵی درێژ",iexcl:"نیشانەی هەڵەوگێڕی سەرسوڕهێنەر",cent:"نیشانەی سەنت",pound:"نیشانەی پاوەند",curren:"نیشانەی دراو",yen:"نیشانەی یەنی ژاپۆنی",brvbar:"شریتی ئەستوونی پچڕاو",sect:"نیشانەی دوو s لەسەریەک",uml:"خاڵ",copy:"نیشانەی مافی چاپ", ordf:"هێڵ لەسەر پیتی a",laquo:"دوو تیری بەدووایەکی چەپ",not:"نیشانەی نەخێر",reg:"نیشانەی R لەناو بازنەدا",macr:"ماکڕۆن",deg:"نیشانەی پلە",sup2:"سەرنووسی دوو",sup3:"سەرنووسی سێ",acute:"لاری تیژ",micro:"نیشانەی u لق درێژی چەپی خواروو",para:"نیشانەی پەڕەگراف",middot:"ناوەڕاستی خاڵ",cedil:"نیشانەی c ژێر چووکرە",sup1:"سەرنووسی یەک",ordm:"هێڵ لەژێر پیتی o",raquo:"دوو تیری بەدووایەکی ڕاست",frac14:"یەک لەسەر چووار",frac12:"یەک لەسەر دوو",frac34:"سێ لەسەر چووار",iquest:"هێمای هەڵەوگێری پرسیار",Agrave:"پیتی لاتینی A-ی گەورە لەگەڵ ڕوومەتداری لار", Aacute:"پیتی لاتینی A-ی گەورە لەگەڵ ڕوومەتداری تیژ",Acirc:"پیتی لاتینی A-ی گەورە لەگەڵ نیشانە لەسەری",Atilde:"پیتی لاتینی A-ی گەورە لەگەڵ زەڕە",Auml:"پیتی لاتینی A-ی گەورە لەگەڵ نیشانە لەسەری",Aring:"پیتی لاتینی گەورەی Å",AElig:"پیتی لاتینی گەورەی Æ",Ccedil:"پیتی لاتینی C-ی گەورە لەگەڵ ژێر چووکرە",Egrave:"پیتی لاتینی E-ی گەورە لەگەڵ ڕوومەتداری لار",Eacute:"پیتی لاتینی E-ی گەورە لەگەڵ ڕوومەتداری تیژ",Ecirc:"پیتی لاتینی E-ی گەورە لەگەڵ نیشانە لەسەری",Euml:"پیتی لاتینی E-ی گەورە لەگەڵ نیشانە لەسەری", Igrave:"پیتی لاتینی I-ی گەورە لەگەڵ ڕوومەتداری لار",Iacute:"پیتی لاتینی I-ی گەورە لەگەڵ ڕوومەتداری تیژ",Icirc:"پیتی لاتینی I-ی گەورە لەگەڵ نیشانە لەسەری",Iuml:"پیتی لاتینی I-ی گەورە لەگەڵ نیشانە لەسەری",ETH:"پیتی لاتینی E-ی گەورەی",Ntilde:"پیتی لاتینی N-ی گەورە لەگەڵ زەڕە",Ograve:"پیتی لاتینی O-ی گەورە لەگەڵ ڕوومەتداری لار",Oacute:"پیتی لاتینی O-ی گەورە لەگەڵ ڕوومەتداری تیژ",Ocirc:"پیتی لاتینی O-ی گەورە لەگەڵ نیشانە لەسەری",Otilde:"پیتی لاتینی O-ی گەورە لەگەڵ زەڕە",Ouml:"پیتی لاتینی O-ی گەورە لەگەڵ نیشانە لەسەری", times:"نیشانەی لێکدان",Oslash:"پیتی لاتینی گەورەی Ø لەگەڵ هێمای دڵ وەستان",Ugrave:"پیتی لاتینی U-ی گەورە لەگەڵ ڕوومەتداری لار",Uacute:"پیتی لاتینی U-ی گەورە لەگەڵ ڕوومەتداری تیژ",Ucirc:"پیتی لاتینی U-ی گەورە لەگەڵ نیشانە لەسەری",Uuml:"پیتی لاتینی U-ی گەورە لەگەڵ نیشانە لەسەری",Yacute:"پیتی لاتینی Y-ی گەورە لەگەڵ ڕوومەتداری تیژ",THORN:"پیتی لاتینی دڕکی گەورە",szlig:"پیتی لاتنی نووک تیژی s",agrave:"پیتی لاتینی a-ی بچووک لەگەڵ ڕوومەتداری لار",aacute:"پیتی لاتینی a-ی بچووك لەگەڵ ڕوومەتداری تیژ",acirc:"پیتی لاتینی a-ی بچووك لەگەڵ نیشانە لەسەری", atilde:"پیتی لاتینی a-ی بچووك لەگەڵ زەڕە",auml:"پیتی لاتینی a-ی بچووك لەگەڵ نیشانە لەسەری",aring:"پیتی لاتینی å-ی بچووك",aelig:"پیتی لاتینی æ-ی بچووك",ccedil:"پیتی لاتینی c-ی بچووك لەگەڵ ژێر چووکرە",egrave:"پیتی لاتینی e-ی بچووك لەگەڵ ڕوومەتداری لار",eacute:"پیتی لاتینی e-ی بچووك لەگەڵ ڕوومەتداری تیژ",ecirc:"پیتی لاتینی e-ی بچووك لەگەڵ نیشانە لەسەری",euml:"پیتی لاتینی e-ی بچووك لەگەڵ نیشانە لەسەری",igrave:"پیتی لاتینی i-ی بچووك لەگەڵ ڕوومەتداری لار",iacute:"پیتی لاتینی i-ی بچووك لەگەڵ ڕوومەتداری تیژ", icirc:"پیتی لاتینی i-ی بچووك لەگەڵ نیشانە لەسەری",iuml:"پیتی لاتینی i-ی بچووك لەگەڵ نیشانە لەسەری",eth:"پیتی لاتینی e-ی بچووك",ntilde:"پیتی لاتینی n-ی بچووك لەگەڵ زەڕە",ograve:"پیتی لاتینی o-ی بچووك لەگەڵ ڕوومەتداری لار",oacute:"پیتی لاتینی o-ی بچووك له‌گەڵ ڕوومەتداری تیژ",ocirc:"پیتی لاتینی o-ی بچووك لەگەڵ نیشانە لەسەری",otilde:"پیتی لاتینی o-ی بچووك لەگەڵ زەڕە",ouml:"پیتی لاتینی o-ی بچووك لەگەڵ نیشانە لەسەری",divide:"نیشانەی دابەش",oslash:"پیتی لاتینی گەورەی ø لەگەڵ هێمای دڵ وەستان",ugrave:"پیتی لاتینی u-ی بچووك لەگەڵ ڕوومەتداری لار", uacute:"پیتی لاتینی u-ی بچووك لەگەڵ ڕوومەتداری تیژ",ucirc:"پیتی لاتینی u-ی بچووك لەگەڵ نیشانە لەسەری",uuml:"پیتی لاتینی u-ی بچووك لەگەڵ نیشانە لەسەری",yacute:"پیتی لاتینی y-ی بچووك لەگەڵ ڕوومەتداری تیژ",thorn:"پیتی لاتینی دڕکی بچووك",yuml:"پیتی لاتینی y-ی بچووك لەگەڵ نیشانە لەسەری",OElig:"پیتی لاتینی گەورەی پێکەوەنووسراوی OE",oelig:"پیتی لاتینی بچووکی پێکەوەنووسراوی oe",372:"پیتی لاتینی W-ی گەورە لەگەڵ نیشانە لەسەری",374:"پیتی لاتینی Y-ی گەورە لەگەڵ نیشانە لەسەری",373:"پیتی لاتینی w-ی بچووکی لەگەڵ نیشانە لەسەری", 375:"پیتی لاتینی y-ی بچووکی لەگەڵ نیشانە لەسەری",sbquo:"نیشانەی فاریزەی نزم",8219:"نیشانەی فاریزەی بەرزی پێچەوانە",bdquo:"دوو فاریزەی تەنیش یەك",hellip:"ئاسۆیی بازنە",trade:"نیشانەی بازرگانی",9658:"ئاراستەی ڕەشی دەستی ڕاست",bull:"فیشەك",rarr:"تیری دەستی ڕاست",rArr:"دووتیری دەستی ڕاست",hArr:"دوو تیری ڕاست و چەپ",diams:"ڕەشی پاقڵاوەیی",asymp:"نیشانەی یەکسانە"});
PypiClean
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/data/formatters/nsidc_common.py
import glob import logging import os import iris import numpy as np from iris.coords import AuxCoord from iris.cube import Cube from esmvaltool.cmorizers.data.utilities import ( fix_var_metadata, save_variable, set_global_atts, ) logger = logging.getLogger(__name__) def cmorize(cfg, region, in_dir, out_dir): """Cmorize NSIDC-0116 dataset.""" glob_attrs = cfg['attributes'] logger.info("Starting cmorization for Tier%s OBS files: %s", glob_attrs['tier'], glob_attrs['dataset_id']) logger.info("Input data from: %s", in_dir) logger.info("Output will be written to: %s", out_dir) file_expr = os.path.join(in_dir, f'icemotion_daily_{region}_*.nc') for filepath in glob.glob(file_expr): logger.info('Cmorizing file %s', filepath) cubes = iris.load(filepath) logger.debug(cubes) lat_coord = _create_coord(cubes, 'lat', 'latitude') lon_coord = _create_coord(cubes, 'lon', 'longitude') lon_coord.points[lon_coord.points < 0] += 360 for var, vals in cfg['variables'].items(): var_info = cfg['cmor_table'].get_variable(vals['mip'], var) logger.info('Cmorizing var %s', var) cube = cubes.extract_cube(iris.Constraint(vals['raw'])) cube.add_aux_coord(lat_coord, (1, 2)) cube.add_aux_coord(lon_coord, (1, 2)) cube.convert_units(var_info.units) logger.debug(cube) glob_attrs['mip'] = vals['mip'] fix_var_metadata(cube, var_info) set_global_atts(cube, glob_attrs) zlib = vals.get('compress', False) if zlib: # Realize data to speed-up writing # pylint: disable=pointless-statement cube.data save_variable(cube, var, out_dir, glob_attrs, zlib=zlib) cubes.remove(cube) _create_areacello(cfg, cube, glob_attrs, out_dir) def _create_areacello(cfg, sample_cube, glob_attrs, out_dir): if not cfg['custom'].get('create_areacello', False): return var_info = cfg['cmor_table'].get_variable('fx', 'areacello') glob_attrs['mip'] = 'fx' lat_coord = sample_cube.coord('latitude') cube = Cube( np.full(lat_coord.shape, cfg['custom']['grid_cell_size'], np.float32), standard_name=var_info.standard_name, long_name=var_info.long_name, var_name=var_info.short_name, units='m2', ) cube.add_aux_coord(lat_coord, (0, 1)) cube.add_aux_coord(sample_cube.coord('longitude'), (0, 1)) cube.add_dim_coord(sample_cube.coord('projection_y_coordinate'), 0) cube.add_dim_coord(sample_cube.coord('projection_x_coordinate'), 1) fix_var_metadata(cube, var_info) set_global_atts(cube, glob_attrs) save_variable(cube, var_info.short_name, out_dir, glob_attrs, zlib=True) def _create_coord(cubes, var_name, standard_name): cube = cubes.extract_cube(standard_name) coord = AuxCoord( cube.data, standard_name=standard_name, long_name=cube.long_name, var_name=var_name, units=cube.units, ) return coord
PypiClean
/HTSQL-2.3.3.tar.gz/HTSQL-2.3.3/src/htsql/tweak/shell/vendor/codemirror-2.13/mode/css/css.js
CodeMirror.defineMode("css", function(config) { var indentUnit = config.indentUnit, type; function ret(style, tp) {type = tp; return style;} function tokenBase(stream, state) { var ch = stream.next(); if (ch == "@") {stream.eatWhile(/\w/); return ret("meta", stream.current());} else if (ch == "/" && stream.eat("*")) { state.tokenize = tokenCComment; return tokenCComment(stream, state); } else if (ch == "<" && stream.eat("!")) { state.tokenize = tokenSGMLComment; return tokenSGMLComment(stream, state); } else if (ch == "=") ret(null, "compare"); else if ((ch == "~" || ch == "|") && stream.eat("=")) return ret(null, "compare"); else if (ch == "\"" || ch == "'") { state.tokenize = tokenString(ch); return state.tokenize(stream, state); } else if (ch == "#") { stream.eatWhile(/\w/); return ret("atom", "hash"); } else if (ch == "!") { stream.match(/^\s*\w*/); return ret("keyword", "important"); } else if (/\d/.test(ch)) { stream.eatWhile(/[\w.%]/); return ret("number", "unit"); } else if (/[,.+>*\/]/.test(ch)) { return ret(null, "select-op"); } else if (/[;{}:\[\]]/.test(ch)) { return ret(null, ch); } else { stream.eatWhile(/[\w\\\-_]/); return ret("variable", "variable"); } } function tokenCComment(stream, state) { var maybeEnd = false, ch; while ((ch = stream.next()) != null) { if (maybeEnd && ch == "/") { state.tokenize = tokenBase; break; } maybeEnd = (ch == "*"); } return ret("comment", "comment"); } function tokenSGMLComment(stream, state) { var dashes = 0, ch; while ((ch = stream.next()) != null) { if (dashes >= 2 && ch == ">") { state.tokenize = tokenBase; break; } dashes = (ch == "-") ? dashes + 1 : 0; } return ret("comment", "comment"); } function tokenString(quote) { return function(stream, state) { var escaped = false, ch; while ((ch = stream.next()) != null) { if (ch == quote && !escaped) break; escaped = !escaped && ch == "\\"; } if (!escaped) state.tokenize = tokenBase; return ret("string", "string"); }; } return { startState: function(base) { return {tokenize: tokenBase, baseIndent: base || 0, stack: []}; }, token: function(stream, state) { if (stream.eatSpace()) return null; var style = state.tokenize(stream, state); var context = state.stack[state.stack.length-1]; if (type == "hash" && context == "rule") style = "atom"; else if (style == "variable") { if (context == "rule") style = "number"; else if (!context || context == "@media{") style = "tag"; } if (context == "rule" && /^[\{\};]$/.test(type)) state.stack.pop(); if (type == "{") { if (context == "@media") state.stack[state.stack.length-1] = "@media{"; else state.stack.push("{"); } else if (type == "}") state.stack.pop(); else if (type == "@media") state.stack.push("@media"); else if (context == "{" && type != "comment") state.stack.push("rule"); return style; }, indent: function(state, textAfter) { var n = state.stack.length; if (/^\}/.test(textAfter)) n -= state.stack[state.stack.length-1] == "rule" ? 2 : 1; return state.baseIndent + n * indentUnit; }, electricChars: "}" }; }); CodeMirror.defineMIME("text/css", "css");
PypiClean
/K40Silence-0.0.1.tar.gz/K40Silence-0.0.1/src/gui/silence.py
import os import threading import wx from src.gui.icons import (icon_CC, icon_down, icon_left, icon_LL, icon_LR, icon_right, icon_UL, icon_up, icon_UR) from src.gui.mwindow import MWindow from src.kernel import Job from .laserrender import ( DRAW_MODE_FLIPXY, DRAW_MODE_INVERT, LaserRender, DRAW_MODE_GCODE, DRAW_MODE_CUT, DRAW_MODE_RASTER, DRAW_MODE_ENGRAVE, DRAW_MODE_ESTIMATE, DRAW_MODE_ZOOM, DRAW_MODE_REFRESH, DRAW_MODE_ANIMATE, ) from .widget import ( GridWidget, GuideWidget, RasterImageWidget, VectorEngraveWidget, VectorCutWidget, TimeEstimateWidget, GCodePathsWidget, ReticleWidget, ) MILS_IN_MM = 39.3701 _ = wx.GetTranslation supported_languages = ( ("en", u"English", wx.LANGUAGE_ENGLISH), ("it", u"italiano", wx.LANGUAGE_ITALIAN), ("fr", u"français", wx.LANGUAGE_FRENCH), ("de", u"Deutsch", wx.LANGUAGE_GERMAN), ("es", u"español", wx.LANGUAGE_SPANISH), ("zh", u"Chinese", wx.LANGUAGE_CHINESE), ) class Silence(MWindow, Job): def __init__(self, *args, **kwds): super().__init__(815, 624, *args, **kwds) Job.__init__(self, job_name="refresh_scene", process=self.refresh_scene, interval=0.05) self.context.setting(int, "draw_mode", 0xFF) self.context.setting(float, "units_convert", MILS_IN_MM) self.context.setting(str, "units_name", "mm") self.context.setting(int, "units_marks", 10) self.context.setting(int, "units_index", 0) self.context.setting(str, "board", "M2") self.context.setting(int, "offset_x", 0) self.context.setting(int, "offset_y", 0) self.context.setting(int, "offset_width", 0) self.context.setting(int, "offset_height", 0) self._Buffer = None self.widget_scene = self.root_context.open("module/Scene") self.screen_refresh_is_requested = False self.screen_refresh_is_running = False self.screen_refresh_lock = threading.Lock() self.background_brush = wx.Brush("Grey") # Menu Bar self.silence_menubar = wx.MenuBar() wxglade_tmp_menu = wx.Menu() self.silence_menubar.settings_save = wxglade_tmp_menu.Append( wx.ID_ANY, "Save Setting File", "Opens a dialog box to save a file to write the current settings to. The settings file will contain all of the settings as they are when the settings file is saved (including the name and location of the active design file.)", ) self.Bind( wx.EVT_MENU, self.on_menu_save, id=self.silence_menubar.settings_save.GetId(), ) self.silence_menubar.settings_load = wxglade_tmp_menu.Append( wx.ID_ANY, "Load Setting File", "Opens a dialog box to select a file to read the settings from. A settings file previously saved can be opens to recover all of the settings as they were when the settings file was saved (including the name and location of the active design file at the time of the save.)", ) self.Bind( wx.EVT_MENU, self.on_menu_load, id=self.silence_menubar.settings_load.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.design_open = wxglade_tmp_menu.Append( wx.ID_ANY, "Open Design", "Opens a dialog box to select a design file to open.", ) self.Bind( wx.EVT_MENU, self.on_menu_open, id=self.silence_menubar.design_open.GetId() ) self.silence_menubar.design_reload = wxglade_tmp_menu.Append( wx.ID_ANY, "Reload Design", "Reloads the current design. (Re-read the design file information from the storage location)", ) self.Bind( wx.EVT_MENU, self.on_menu_reload, id=self.silence_menubar.design_reload.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.egv_send = wxglade_tmp_menu.Append( wx.ID_ANY, "Send EGV File To Laser", "Open a file open dialog to select an EGV file to send to the laser controller. The raw data is not interpreted, so there is no preview of the data to be sent to the laser.", ) self.Bind( wx.EVT_MENU, self.on_menu_egv, id=self.silence_menubar.egv_send.GetId() ) self.silence_menubar.egv_sav = wxglade_tmp_menu.Append( wx.ID_ANY, "Save EGV File", "Save the Raster Engrave data to an EGV file. The file will contain the data that would be sent to the laser if the Raster Engrave button was pressed.", ) self.Bind( wx.EVT_MENU, self.on_menu_egv_save, id=self.silence_menubar.egv_sav.GetId() ) self.silence_menubar.exit = wxglade_tmp_menu.Append( wx.ID_ANY, "Exit", "Exit the program. A dialog box will open to confirm." ) self.Bind(wx.EVT_MENU, self.on_menu_exit, id=self.silence_menubar.exit.GetId()) self.silence_menubar.Append(wxglade_tmp_menu, "File") wxglade_tmp_menu = wx.Menu() self.silence_menubar.scene_refresh = wxglade_tmp_menu.Append( wx.ID_ANY, "Refresh\tF5", "Refreshes items displayed in the main window." ) self.Bind( wx.EVT_MENU, self.on_menu_refresh, id=self.silence_menubar.scene_refresh.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.view_raster = wxglade_tmp_menu.Append( wx.ID_ANY, "Show Raster Image", "Toggle the Raster image display on and off. If the display is turned of the data still exists and the Raster Engrave button will still function. The data is just not displayed which may help make the program run faster.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_raster, id=self.silence_menubar.view_raster.GetId(), ) self.silence_menubar.view_engrave = wxglade_tmp_menu.Append( wx.ID_ANY, "Show Vector Engrave", "Toggle the Vector Engrave line display on and off. If the display is turned of the data still exists and the Vector Engrave button will still function. The data is just not displayed which may help make the program run faster.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_engrave, id=self.silence_menubar.view_engrave.GetId(), ) self.silence_menubar.view_cut = wxglade_tmp_menu.Append( wx.ID_ANY, "Show Vector Cut", "Toggle the Vector Cut line display on and off. If the display is turned of the data still exists and the Vector Cut button will still function. The data is just not displayed which may help make the program run faster.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_cut, id=self.silence_menubar.view_cut.GetId() ) self.silence_menubar.view_gcode = wxglade_tmp_menu.Append( wx.ID_ANY, "Show GCode Paths", "Toggle the G-Code line display on and off. If the display is turned of the data still exists and the G-Code Run button will still function. The data is just not displayed which may help make the program run faster.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_gcode, id=self.silence_menubar.view_gcode.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.view_estimate = wxglade_tmp_menu.Append( wx.ID_ANY, "Show Time Estimate", "Toggle the display of time estimates on and off. When one the time estimates for the various operations will be displayed in the lower right corner of the main window. The estimates should be considered rough estimates. The Vector Cut and Vector Engrave estimates are better than the Raster Engrave estimate. The raster engrave estimate assumes the whole page area is raster engraved so it is generally an upper bound on the time required to raster engrave a given design.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_estimate, id=self.silence_menubar.view_estimate.GetId(), ) self.silence_menubar.view_zoom = wxglade_tmp_menu.Append( wx.ID_ANY, "Zoom to Design Size", "Zoom to the input design size rather than the working area of the laser. This allows for closer inspection of the input design. This is especially useful for small designs.", wx.ITEM_CHECK, ) self.Bind( wx.EVT_MENU, self.on_menu_view_zoom, id=self.silence_menubar.view_zoom.GetId(), ) self.silence_menubar.Append(wxglade_tmp_menu, "View") wxglade_tmp_menu = wx.Menu() self.silence_menubar.tool_raster = wxglade_tmp_menu.Append( wx.ID_ANY, "Calculate Raster Time", "Calculate the time needed to perform raster", ) self.Bind( wx.EVT_MENU, self.on_menu_calc_raster, id=self.silence_menubar.tool_raster.GetId(), ) self.silence_menubar.tool_hull = wxglade_tmp_menu.Append( wx.ID_ANY, "Trace Design Boundary\tCtrl-T", "Trace Design" ) self.Bind( wx.EVT_MENU, self.on_menu_convex_hull, id=self.silence_menubar.tool_hull.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.usb_init = wxglade_tmp_menu.Append( wx.ID_ANY, "Initialize Laser\tCtrl-i", 'Establish connection with the laser controller board, and optionally Home the laser depending on the setting in the General settings window for "Home Upon initialize"', ) self.Bind( wx.EVT_MENU, self.on_menu_initialize, id=self.silence_menubar.usb_init.GetId(), ) wxglade_tmp_menu_sub = wx.Menu() self.silence_menubar.usb_reset = wxglade_tmp_menu_sub.Append( wx.ID_ANY, "Reset USB", "Reset the USB port." ) self.Bind( wx.EVT_MENU, self.on_menu_reset_usb, id=self.silence_menubar.usb_reset.GetId(), ) self.silence_menubar.usb_release = wxglade_tmp_menu_sub.Append( wx.ID_ANY, "Release USB", "Disconnect the laser controller board." ) self.Bind( wx.EVT_MENU, self.on_menu_release_usb, id=self.silence_menubar.usb_release.GetId(), ) wxglade_tmp_menu.Append(wx.ID_ANY, "USB", wxglade_tmp_menu_sub, "") self.silence_menubar.Append(wxglade_tmp_menu, "Tools") wxglade_tmp_menu = wx.Menu() self.silence_menubar.settings_general = wxglade_tmp_menu.Append( wx.ID_ANY, "General Settings\tF2", "Opens the General Settings window." ) self.Bind( wx.EVT_MENU, self.on_menu_settings_general, id=self.silence_menubar.settings_general.GetId(), ) self.silence_menubar.settings_raster = wxglade_tmp_menu.Append( wx.ID_ANY, "Raster Settings\tF3", "Opens the Raster Settings Window" ) self.Bind( wx.EVT_MENU, self.on_menu_settings_raster, id=self.silence_menubar.settings_raster.GetId(), ) self.silence_menubar.settings_rotary = wxglade_tmp_menu.Append( wx.ID_ANY, "Rotary Settings\tF4", "Shows (or hides) the Advanced Settings in the main window.", ) self.Bind( wx.EVT_MENU, self.on_menu_settings_rotary, id=self.silence_menubar.settings_rotary.GetId(), ) wxglade_tmp_menu.AppendSeparator() self.silence_menubar.settings_advanced = wxglade_tmp_menu.Append( wx.ID_ANY, "Advanced Settings\tF6", "Toggle advanced settings" ) self.Bind( wx.EVT_MENU, self.on_menu_settings_advanced, id=self.silence_menubar.settings_advanced.GetId(), ) self.silence_menubar.Append(wxglade_tmp_menu, "Setting") wxglade_tmp_menu = wx.Menu() self.silence_menubar.help_about = wxglade_tmp_menu.Append( wx.ID_ANY, "About", "Open a window that identifies the program." ) self.Bind( wx.EVT_MENU, self.on_menu_help_about, id=self.silence_menubar.help_about.GetId(), ) self.silence_menubar.help_webpage = wxglade_tmp_menu.Append( wx.ID_ANY, "Webpage", "Opens a new browser window with the web page." ) self.Bind( wx.EVT_MENU, self.on_menu_help_webpage, id=self.silence_menubar.help_webpage.GetId(), ) self.silence_menubar.help_manual = wxglade_tmp_menu.Append( wx.ID_ANY, "Manual", "Opens a new browser window to the manual web page." ) self.Bind( wx.EVT_MENU, self.on_menu_help_manual, id=self.silence_menubar.help_manual.GetId(), ) self.silence_menubar.Append(wxglade_tmp_menu, "Help") self.SetMenuBar(self.silence_menubar) # Menu Bar end self.silence_statusbar = self.CreateStatusBar(1) self.panel_6 = wx.Panel(self, wx.ID_ANY) self.button_usb_init = wx.Button( self.panel_6, wx.ID_ANY, "Initialize Laser Cutter" ) self.button_design_open = wx.Button( self.panel_6, wx.ID_ANY, "Open\nDesign File" ) self.button_design_reload = wx.Button( self.panel_6, wx.ID_ANY, "Reload\nDesign File" ) self.panel_1 = wx.Panel(self.panel_6, wx.ID_ANY) self.button_home = wx.Button(self.panel_1, wx.ID_ANY, "Home") self.button_unlock_rail = wx.Button(self.panel_1, wx.ID_ANY, "Unlock Rail") self.text_jog_step = wx.TextCtrl(self.panel_1, wx.ID_ANY, "10.0") self.panel_2 = wx.Panel(self.panel_6, wx.ID_ANY) self.button_align_TL = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_UL.GetBitmap(resize=(40, 40)) ) self.button_jog_T = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_up.GetBitmap(resize=(40, 40)) ) self.button_align_TR = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_UR.GetBitmap(resize=(40, 40)) ) self.button_jog_L = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_left.GetBitmap(resize=(40, 40)) ) self.button_align_C = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_CC.GetBitmap(resize=(40, 40)) ) self.button_jog_R = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_right.GetBitmap(resize=(40, 40)) ) self.button_align_BL = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_LL.GetBitmap(resize=(40, 40)) ) self.button_jog_B = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_down.GetBitmap(resize=(40, 40)) ) self.button_align_BR = wx.BitmapButton( self.panel_2, wx.ID_ANY, icon_LR.GetBitmap(resize=(40, 40)) ) self.panel_4 = wx.Panel(self.panel_2, wx.ID_ANY) self.button_move = wx.Button(self.panel_4, wx.ID_ANY, "Move To") self.text_move_x = wx.TextCtrl( self.panel_4, wx.ID_ANY, "0.0", style=wx.TE_CENTRE ) self.text_move_y = wx.TextCtrl( self.panel_4, wx.ID_ANY, "0.0", style=wx.TE_CENTRE ) self.panel_3 = wx.Panel(self.panel_6, wx.ID_ANY) self.button_raster = wx.Button(self.panel_3, wx.ID_ANY, "Raster Engrave") self.text_raster_speed = wx.TextCtrl( self.panel_3, wx.ID_ANY, "100", style=wx.TE_CENTRE ) self.button_engrave = wx.Button(self.panel_3, wx.ID_ANY, "Vector Engrave") self.text_engrave_speed = wx.TextCtrl( self.panel_3, wx.ID_ANY, "20", style=wx.TE_CENTRE ) self.button_cut = wx.Button(self.panel_3, wx.ID_ANY, "Vector Cut") self.text_cut_speed = wx.TextCtrl( self.panel_3, wx.ID_ANY, "10", style=wx.TE_CENTRE ) self.button_pause_stop = wx.Button(self.panel_3, wx.ID_ANY, "Pause/Stop") self.advanced_settings = wx.Panel(self, wx.ID_ANY) self.checkbox_halftone = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Halftone (Dither)" ) self.checkbox_invert = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Invert Raster Color" ) self.checkbox_mirror = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Mirror Design" ) self.checkbox_rotate = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Rotate Design" ) self.checkbox_csys = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Use Input CSYS" ) self.checkbox_cut_inner = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Cut Inside First" ) self.checkbox_rotary_enable = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Use Rotary Settings" ) self.checkbox_group_engrave = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Group Engrave Tasks" ) self.checkbox_group_vector = wx.CheckBox( self.advanced_settings, wx.ID_ANY, "Group Vector Tasks" ) self.text_raster_passes = wx.TextCtrl(self.advanced_settings, wx.ID_ANY, "1") self.text_engrave_passes = wx.TextCtrl(self.advanced_settings, wx.ID_ANY, "1") self.text_cut_passes = wx.TextCtrl(self.advanced_settings, wx.ID_ANY, "1") self.button_hide_advanced = wx.Button( self.advanced_settings, wx.ID_ANY, "Hide Advanced" ) self.scene = wx.Panel(self, style=wx.EXPAND | wx.WANTS_CHARS) self.scene.SetDoubleBuffered(True) self.__set_properties() self.__do_layout() self.renderer = LaserRender(self.context) self.scene.Bind(wx.EVT_PAINT, self.on_paint) self.scene.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase) self.scene.Bind(wx.EVT_MOTION, self.on_mouse_move) self.scene.Bind(wx.EVT_MOUSEWHEEL, self.on_mousewheel) self.scene.Bind(wx.EVT_MIDDLE_DOWN, self.on_mouse_middle_down) self.scene.Bind(wx.EVT_MIDDLE_UP, self.on_mouse_middle_up) self.scene.Bind(wx.EVT_LEFT_DCLICK, self.on_mouse_double_click) self.scene.Bind(wx.EVT_RIGHT_DOWN, self.on_right_mouse_down) self.scene.Bind(wx.EVT_RIGHT_UP, self.on_right_mouse_up) self.scene.Bind(wx.EVT_LEFT_DOWN, self.on_left_mouse_down) self.scene.Bind(wx.EVT_LEFT_UP, self.on_left_mouse_up) self.widget_scene.add_scenewidget(ReticleWidget(self.widget_scene)) self.widget_scene.add_scenewidget(VectorEngraveWidget(self.widget_scene, self.context.elements, self.renderer)) self.widget_scene.add_scenewidget(VectorCutWidget(self.widget_scene, self.context.elements, self.renderer)) self.widget_scene.add_scenewidget(GCodePathsWidget(self.widget_scene, self.context.elements, self.renderer)) self.widget_scene.add_scenewidget(RasterImageWidget(self.widget_scene, self.context.elements, self.renderer)) self.widget_scene.add_scenewidget(GridWidget(self.widget_scene)) self.widget_scene.add_interfacewidget(GuideWidget(self.widget_scene)) self.widget_scene.add_interfacewidget(TimeEstimateWidget(self.widget_scene, self.context.elements)) try: self.scene.Bind(wx.EVT_MAGNIFY, self.on_magnify_mouse) self.EnableTouchEvents(wx.TOUCH_ZOOM_GESTURE | wx.TOUCH_PAN_GESTURES) self.scene.Bind(wx.EVT_GESTURE_PAN, self.on_gesture) self.scene.Bind(wx.EVT_GESTURE_ZOOM, self.on_gesture) except AttributeError: # Not WX 4.1 pass self.scene.SetFocus() self.Bind(wx.EVT_BUTTON, self.on_button_initialize_laser, self.button_usb_init) self.Bind(wx.EVT_BUTTON, self.on_button_open_design, self.button_design_open) self.Bind( wx.EVT_BUTTON, self.on_button_reload_design, self.button_design_reload ) self.Bind(wx.EVT_BUTTON, self.on_button_home, self.button_home) self.Bind(wx.EVT_BUTTON, self.on_button_unlock_rail, self.button_unlock_rail) self.Bind(wx.EVT_TEXT, self.on_text_jog_step, self.text_jog_step) self.Bind(wx.EVT_BUTTON, self.on_button_align_top_left, self.button_align_TL) self.Bind(wx.EVT_BUTTON, self.on_button_jog_top, self.button_jog_T) self.Bind(wx.EVT_BUTTON, self.on_button_align_top_right, self.button_align_TR) self.Bind(wx.EVT_BUTTON, self.on_button_jog_left, self.button_jog_L) self.Bind(wx.EVT_BUTTON, self.on_button_align_center, self.button_align_C) self.Bind(wx.EVT_BUTTON, self.on_button_jog_right, self.button_jog_R) self.Bind(wx.EVT_BUTTON, self.on_button_align_bottom_left, self.button_align_BL) self.Bind(wx.EVT_BUTTON, self.on_button_jog_bottom, self.button_jog_B) self.Bind( wx.EVT_BUTTON, self.on_button_align_bottom_right, self.button_align_BR ) self.Bind(wx.EVT_BUTTON, self.on_button_move, self.button_move) self.Bind(wx.EVT_TEXT, self.on_text_move_x, self.text_move_x) self.Bind(wx.EVT_TEXT, self.on_text_move_y, self.text_move_y) self.Bind(wx.EVT_BUTTON, self.on_button_raster_engrave, self.button_raster) self.Bind(wx.EVT_TEXT, self.on_text_raster_speed, self.text_raster_speed) self.Bind(wx.EVT_BUTTON, self.on_button_vector_engrave, self.button_engrave) self.Bind(wx.EVT_TEXT, self.on_text_engrave_speed, self.text_engrave_speed) self.Bind(wx.EVT_BUTTON, self.on_button_vector_cut, self.button_cut) self.Bind(wx.EVT_TEXT, self.on_text_cut_speed, self.text_cut_speed) self.Bind(wx.EVT_BUTTON, self.on_button_pause_stop, self.button_pause_stop) self.Bind(wx.EVT_CHECKBOX, self.on_check_halftone, self.checkbox_halftone) self.Bind(wx.EVT_CHECKBOX, self.on_check_invert, self.checkbox_invert) self.Bind(wx.EVT_CHECKBOX, self.on_check_mirror, self.checkbox_mirror) self.Bind(wx.EVT_CHECKBOX, self.on_check_rotate, self.checkbox_rotate) self.Bind(wx.EVT_CHECKBOX, self.on_check_csys, self.checkbox_csys) self.Bind(wx.EVT_CHECKBOX, self.on_check_cut_inside, self.checkbox_cut_inner) self.Bind( wx.EVT_CHECKBOX, self.on_check_use_rotary, self.checkbox_rotary_enable ) self.Bind( wx.EVT_CHECKBOX, self.on_check_group_engrave, self.checkbox_group_engrave ) self.Bind( wx.EVT_CHECKBOX, self.on_check_group_vector, self.checkbox_group_vector ) self.Bind( wx.EVT_TEXT, self.on_text_raster_engrave_passes, self.text_raster_passes ) self.Bind( wx.EVT_TEXT, self.on_text_vector_engrave_passes, self.text_engrave_passes ) self.Bind(wx.EVT_TEXT, self.on_text_vector_cut_passes, self.text_cut_passes) self.Bind( wx.EVT_BUTTON, self.on_button_hide_advanced, self.button_hide_advanced ) # end wxGlade self.Bind(wx.EVT_SIZE, self.on_size) self.Bind(wx.EVT_DROP_FILES, self.on_drop_file) @self.context.console_command("refresh", help="Silence refresh") def refresh(command, channel, _, args=tuple(), **kwargs): self.Layout() self.Update() self.Refresh() channel(_("Refreshed.")) return @self.context.console_argument( "filename", type=str, help="filename of design to load", ) @self.context.console_command( "design_load", help="design_load <filename>", ) def plan(command, channel, _, filename=None, args=tuple(), **kwargs): self.tryopen(filename) self.context.setting(str, "working_file", None) self.context.setting(float, "jog_step", 10.0) self.context.setting(float, "move_x", 0.0) self.context.setting(float, "move_y", 0.0) self.context.setting(bool, "halftone", True) self.context.setting(bool, "invert", False) self.context.setting(bool, "mirror", False) self.context.setting(bool, "rotate", False) self.context.setting(bool, "csys", False) self.context.setting(bool, "cut_inner", True) self.context.setting(bool, "rotary_enable", False) self.context.setting(bool, "group_engrave", False) self.context.setting(bool, "group_vector", False) self.text_jog_step.SetValue(str(self.context.jog_step)) self.text_move_x.SetValue(str(self.context.move_x)) self.text_move_y.SetValue(str(self.context.move_y)) self.text_raster_speed.SetValue(str(self.context.raster_settings.speed)) self.text_engrave_speed.SetValue(str(self.context.engrave_settings.speed)) self.text_cut_speed.SetValue(str(self.context.cut_settings.speed)) self.checkbox_halftone.SetValue(self.context.halftone) self.checkbox_invert.SetValue(self.context.invert) self.checkbox_rotate.SetValue(self.context.rotate) self.checkbox_csys.SetValue(self.context.csys) self.checkbox_cut_inner.SetValue(self.context.cut_inner) self.checkbox_rotary_enable.SetValue(self.context.rotary_enable) self.checkbox_group_vector.SetValue(self.context.group_vector) self.checkbox_group_engrave.SetValue(self.context.group_engrave) self.text_raster_passes.SetValue(str(self.context.raster_settings.implicit_passes)) self.text_engrave_passes.SetValue(str(self.context.engrave_settings.implicit_passes)) self.text_cut_passes.SetValue(str(self.context.cut_settings.implicit_passes)) self.toggle_advance_settings() self.context.listen("rotary_enable", self.on_rotary_enable) self.context.listen("halftone", self.on_halftone) self.context.listen("op_setting_update", self.on_op_setting_update) self.context.listen("refresh_scene", self.on_refresh_scene) self.context.listen("bed_size", self.on_bed_changed) self.context.listen("units", self.on_space_changed) self.context.listen("draw_mode", self.on_draw_mode) self.context.listen("statusbar", self.on_statusbar) self.on_draw_mode(self.context.draw_mode) bed_dim = self.context.get_context("bed") bed_dim.setting(float, "bed_width", 325.0) bed_dim.setting(float, "bed_height", 220.0) bbox = (0, 0, bed_dim.bed_width * MILS_IN_MM, bed_dim.bed_height * MILS_IN_MM) self.widget_scene.widget_root.focus_viewport_scene( bbox, self.scene.ClientSize, 0.1 ) self.context.schedule(self) def load_or_open(self, filename): """ Loads recent file name given. If the filename cannot be opened attempts open dialog at last known location. """ if os.path.exists(filename): try: self.load(filename) except PermissionError: self.tryopen(filename) else: self.tryopen(filename) def tryopen(self, filename): """ Loads an open dialog at given filename to load data. """ files = self.context.load_types() if filename is not None: defaultFile = os.path.basename(filename) defaultDir = os.path.dirname(filename) else: defaultFile = "" defaultDir = "." with wx.FileDialog( self, _("Open"), defaultDir=defaultDir, defaultFile=defaultFile, wildcard=files, style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST ) as fileDialog: fileDialog.SetFilename(defaultFile) if fileDialog.ShowModal() == wx.ID_CANCEL: return # the user changed their mind pathname = fileDialog.GetPath() self.load(pathname) def load(self, pathname): self.context.setting(bool, "uniform_svg", False) self.context.setting(float, "svg_ppi", 96.0) self.context("raster clear\ncut clear\nengrave clear\n") self.context.working_file = pathname with wx.BusyInfo(_("Loading File...")): if pathname.endswith("svg"): self.context('inkscape locate input "%s" text2path load makepng load image wizard Gravy\n' % pathname) return True results = self.context.load( pathname, channel=self.context.channel("load"), svg_ppi=self.context.svg_ppi, ) if pathname.lower().endswith("png") or pathname.lower().endswith("jpg"): self.context("image wizard Gravy\n") self.request_refresh() return bool(results) def on_drop_file(self, event): """ Drop file handler Accepts multiple files drops. """ accepted = 0 rejected = 0 rejected_files = [] for pathname in event.GetFiles(): if self.load(pathname): accepted += 1 else: rejected += 1 rejected_files.append(pathname) if rejected != 0: reject = "\n".join(rejected_files) err_msg = _("Some files were unrecognized:\n%s") % reject dlg = wx.MessageDialog( None, err_msg, _("Error encountered"), wx.OK | wx.ICON_ERROR ) dlg.ShowModal() dlg.Destroy() def on_halftone(self, *args, **kwargs): self.checkbox_halftone.SetValue(self.context.halftone) def on_rotary_enable(self, *args, **kwargs): self.checkbox_rotary_enable.SetValue(self.context.rotary_enable) def on_op_setting_update(self, *args, **kwargs): self.text_raster_passes.SetValue(str(self.context.raster_settings.implicit_passes)) self.text_engrave_passes.SetValue(str(self.context.engrave_settings.implicit_passes)) self.text_cut_passes.SetValue(str(self.context.cut_settings.implicit_passes)) self.text_raster_speed.SetValue(str(self.context.raster_settings.speed)) self.text_engrave_speed.SetValue(str(self.context.engrave_settings.speed)) self.text_cut_speed.SetValue(str(self.context.cut_settings.speed)) def window_open(self): pass def window_close(self): self.context("quit\n") self.context.unlisten("statusbar", self.on_statusbar) self.context.unlisten("rotary_enable", self.on_rotary_enable) self.context.unlisten("halftone", self.on_halftone) self.context.unlisten("op_setting_update", self.on_op_setting_update) self.context.unlisten("bed_size", self.on_bed_changed) self.context.unlisten("units", self.on_space_changed) self.context.unlisten("draw_mode", self.on_draw_mode) def on_statusbar(self, message=None, color=None): if message is None: silence_statusbar_fields = [ "Current Position: X=%f Y=%f (W X H)=(%fmm X %fmm)" % (self.context.offset_x, self.context.offset_y, self.context.offset_width, self.context.offset_height), ] else: silence_statusbar_fields = [ message, ] if color is None: self.silence_statusbar.SetBackgroundColour(wx.WHITE) elif color == 1: self.silence_statusbar.SetBackgroundColour(wx.CYAN) elif color == 2: self.silence_statusbar.SetBackgroundColour(wx.YELLOW) elif color == 3: self.silence_statusbar.SetBackgroundColour(wx.RED) elif color == 4: self.silence_statusbar.SetBackgroundColour(wx.GREEN) elif color == 5: self.silence_statusbar.SetBackgroundColour(wx.BLUE) for i in range(len(silence_statusbar_fields)): self.silence_statusbar.SetStatusText(silence_statusbar_fields[i], i) def __set_properties(self): # begin wxGlade: Silence.__set_properties self.SetTitle("%s v%s" % (self.context._kernel.name, self.context._kernel.version)) self.silence_statusbar.SetStatusWidths([-1]) # statusbar fields self.on_statusbar(_("Welcome to Silence")) self.button_usb_init.SetToolTip( 'Establish connection with the laser controller board, and optionally Home the laser depending on the setting in the General settings window for "Home Upon initialize"' ) self.button_design_open.SetMinSize((100, 50)) self.button_design_open.SetToolTip( "Opens a dialog box to select a design file to open.\n\nOpen SVG, DXF and G-Code files using file open." ) self.button_design_reload.SetMinSize((100, 50)) self.button_design_reload.SetToolTip( "Reloads the current design. (Re-read the design file information from the storage location)" ) self.button_home.SetToolTip("Sends the laser to its Home position.") self.button_unlock_rail.SetToolTip( "Turns the stepper motors attached to the X and Y axes off. This allows the user to move the laser head manually." ) self.text_jog_step.SetToolTip( "The distance the laser head will move when the laser head is moved (jogged) when the arrow buttons are pressed." ) self.button_align_TL.SetMinSize((50, 50)) self.button_align_TL.SetToolTip( "The four corner buttons and the center button are used to move the laser head to the corners of the loaded design. This is useful to verify that the design will fit in the space available on the material you are using." ) self.button_jog_T.SetMinSize((50, 50)) self.button_jog_T.SetToolTip( "The four arrow buttons move (jog) the laser head in the indicated direction the distance entered in the Jog Step entry field." ) self.button_align_TR.SetMinSize((50, 50)) self.button_align_TR.SetToolTip( "The four corner buttons and the center button are used to move the laser head to the corners of the loaded design. This is useful to verify that the design will fit in the space available on the material you are using." ) self.button_jog_L.SetMinSize((50, 50)) self.button_jog_L.SetToolTip( "The four arrow buttons move (jog) the laser head in the indicated direction the distance entered in the Jog Step entry field." ) self.button_align_C.SetMinSize((50, 50)) self.button_align_C.SetToolTip( "The four corner buttons and the center button are used to move the laser head to the corners of the loaded design. This is useful to verify that the design will fit in the space available on the material you are using." ) self.button_jog_R.SetMinSize((50, 50)) self.button_jog_R.SetToolTip( "The four arrow buttons move (jog) the laser head in the indicated direction the distance entered in the Jog Step entry field." ) self.button_align_BL.SetMinSize((50, 50)) self.button_align_BL.SetToolTip( "The four corner buttons and the center button are used to move the laser head to the corners of the loaded design. This is useful to verify that the design will fit in the space available on the material you are using." ) self.button_jog_B.SetMinSize((50, 50)) self.button_jog_B.SetToolTip( "The four arrow buttons move (jog) the laser head in the indicated direction the distance entered in the Jog Step entry field." ) self.button_align_BR.SetMinSize((50, 50)) self.button_align_BR.SetToolTip( "The four corner buttons and the center button are used to move the laser head to the corners of the loaded design. This is useful to verify that the design will fit in the space available on the material you are using." ) self.button_move.SetMinSize((75, 23)) self.button_move.SetToolTip( "The Move To button will home the laser then move the laser head to the X and Y position entered in the X and Y fields." ) self.text_move_x.SetMinSize((50, 20)) self.text_move_y.SetMinSize((50, 20)) self.button_raster.SetMinSize((100, 23)) self.button_raster.SetToolTip( "Start the raster engrave process. This will start the laser." ) self.text_raster_speed.SetMinSize((52, 23)) self.button_engrave.SetToolTip( "Start the vector engrave process. This will start the laser." ) self.text_engrave_speed.SetMinSize((52, 23)) self.text_engrave_speed.SetForegroundColour(wx.Colour(0, 0, 255)) self.button_cut.SetMinSize((100, 23)) self.button_cut.SetToolTip( "Start the vector cut process. This will start the laser." ) self.text_cut_speed.SetMinSize((52, 23)) self.text_cut_speed.SetForegroundColour(wx.Colour(255, 0, 0)) self.button_pause_stop.SetMinSize((0, 50)) self.button_pause_stop.SetBackgroundColour(wx.Colour(142, 35, 35)) self.button_pause_stop.SetToolTip( "Stop the running laser job. Pressing this button will pause sending the current job data to the laser and pop up a dialog box asking if you want to cancel the remainder of the job. The laser head may not stop instantly because there is some data that has already been sent to the laser controller board. The laser will finish running the data it has received before it stops." ) self.panel_6.SetMinSize((200, 0)) self.checkbox_halftone.SetToolTip( "Turn on or off halftone (dither). Turning this option on converts any input grayscale images into dots that are spaced to approximate a grayscale image when engraved. This is similar to the process used to produce images for newsprint. The stock controller board does not have the capability to control the laser power so dithering is the only way to get grayscale images using the stock controller. You can also dither the image before loading it." ) self.checkbox_halftone.SetValue(1) self.checkbox_invert.SetToolTip( "Inverts the colors in the raster image. This has the effect of turning the raster image into the negative of the original image so white is black and so on." ) self.checkbox_mirror.SetToolTip( "Mirrors the input design. This option is useful for making stamps or engraving on the back of mirrors." ) self.checkbox_rotate.SetToolTip("Simple rotate the input design by 90 degrees.") self.checkbox_csys.SetToolTip( "This option causes the reference point for a design to be the origin of the coordinate system of the input DXF file." ) self.checkbox_cut_inner.SetToolTip( "When this option is on the paths that are inside of other closed paths will be cut first. This is important sometimes because if the outside part is cut first it can fall out or shift on the cutting table resulting in a misaligned cut for the internal features if they are cut after the outside features." ) self.checkbox_cut_inner.SetValue(1) self.checkbox_rotary_enable.SetToolTip( "This option causes the program to use the settings in the rotary settings window intended to be used with a rotary devices attached to the y-axis connector on the controller board." ) self.checkbox_group_engrave.SetToolTip( 'This option combines the buttons for the raster and vector engraving tasks so both tasks are performed with one button click. If the "Group Vector Tasks" is also selected all three buttons are combined into one.' ) self.checkbox_group_vector.SetToolTip( 'This option combines the buttons for the vector engraving and cutting tasks so both tasks are performed with one button click. If the "Group Engrave Tasks" is also selected all three buttons are combined into one.' ) self.text_raster_passes.SetToolTip( "Set the number of time to repeat the raster engrave paths." ) self.text_engrave_passes.SetForegroundColour(wx.Colour(0, 0, 255)) self.text_engrave_passes.SetToolTip( "Set the number of time to repeat the vector engrave paths." ) self.text_cut_passes.SetForegroundColour(wx.Colour(255, 0, 0)) self.text_cut_passes.SetToolTip( "Set the number of time to repeat the vector cut paths." ) self.button_hide_advanced.SetMinSize((0, 50)) self.button_hide_advanced.SetToolTip( "Button to hide the advanced options pane." ) self.scene.SetBackgroundColour(wx.Colour(192, 192, 192)) # end wxGlade def __do_layout(self): # begin wxGlade: Silence.__do_layout sizer_1 = wx.BoxSizer(wx.HORIZONTAL) sizer_20 = wx.BoxSizer(wx.HORIZONTAL) sizer_19 = wx.BoxSizer(wx.VERTICAL) sizer_23 = wx.BoxSizer(wx.HORIZONTAL) sizer_22 = wx.BoxSizer(wx.HORIZONTAL) sizer_21 = wx.BoxSizer(wx.HORIZONTAL) sizer_2 = wx.BoxSizer(wx.VERTICAL) sizer_14 = wx.BoxSizer(wx.VERTICAL) sizer_17 = wx.BoxSizer(wx.HORIZONTAL) sizer_16 = wx.BoxSizer(wx.HORIZONTAL) sizer_15 = wx.BoxSizer(wx.HORIZONTAL) sizer_7 = wx.BoxSizer(wx.VERTICAL) sizer_11 = wx.BoxSizer(wx.HORIZONTAL) sizer_13 = wx.BoxSizer(wx.VERTICAL) sizer_12 = wx.BoxSizer(wx.VERTICAL) sizer_18 = wx.BoxSizer(wx.VERTICAL) sizer_10 = wx.BoxSizer(wx.HORIZONTAL) sizer_9 = wx.BoxSizer(wx.HORIZONTAL) sizer_8 = wx.BoxSizer(wx.HORIZONTAL) sizer_4 = wx.BoxSizer(wx.VERTICAL) sizer_6 = wx.BoxSizer(wx.HORIZONTAL) sizer_5 = wx.BoxSizer(wx.HORIZONTAL) sizer_3 = wx.BoxSizer(wx.HORIZONTAL) sizer_2.Add(self.button_usb_init, 0, wx.EXPAND, 0) sizer_3.Add(self.button_design_open, 0, wx.EXPAND, 0) sizer_3.Add(self.button_design_reload, 0, wx.EXPAND, 0) sizer_2.Add(sizer_3, 0, wx.EXPAND, 0) static_line_1 = wx.StaticLine(self.panel_6, wx.ID_ANY) sizer_2.Add(static_line_1, 0, wx.EXPAND, 0) label_1 = wx.StaticText(self.panel_1, wx.ID_ANY, "Position Controls:") sizer_4.Add(label_1, 0, 0, 0) sizer_5.Add(self.button_home, 0, 0, 0) sizer_5.Add(self.button_unlock_rail, 0, 0, 0) sizer_4.Add(sizer_5, 0, wx.EXPAND, 0) label_2 = wx.StaticText(self.panel_1, wx.ID_ANY, "Jog Step") sizer_6.Add(label_2, 0, 0, 0) sizer_6.Add(self.text_jog_step, 0, wx.ALIGN_CENTER_VERTICAL, 0) label_3 = wx.StaticText(self.panel_1, wx.ID_ANY, "mm") sizer_6.Add(label_3, 0, 0, 0) sizer_4.Add(sizer_6, 0, wx.EXPAND, 0) self.panel_1.SetSizer(sizer_4) sizer_2.Add(self.panel_1, 0, wx.EXPAND, 0) sizer_8.Add(self.button_align_TL, 1, 0, 0) sizer_8.Add(self.button_jog_T, 1, 0, 0) sizer_8.Add(self.button_align_TR, 1, 0, 0) sizer_7.Add(sizer_8, 0, wx.ALIGN_CENTER_HORIZONTAL, 0) sizer_9.Add(self.button_jog_L, 1, 0, 0) sizer_9.Add(self.button_align_C, 1, 0, 0) sizer_9.Add(self.button_jog_R, 1, 0, 0) sizer_7.Add(sizer_9, 0, wx.ALIGN_CENTER_HORIZONTAL, 0) sizer_10.Add(self.button_align_BL, 1, 0, 0) sizer_10.Add(self.button_jog_B, 1, 0, 0) sizer_10.Add(self.button_align_BR, 1, 0, 0) sizer_7.Add(sizer_10, 0, wx.ALIGN_CENTER_HORIZONTAL, 0) label_10 = wx.StaticText(self.panel_4, wx.ID_ANY, "") sizer_18.Add(label_10, 0, 0, 0) sizer_18.Add(self.button_move, 0, 0, 0) sizer_11.Add(sizer_18, 2, wx.EXPAND, 0) label_4 = wx.StaticText(self.panel_4, wx.ID_ANY, " X") sizer_12.Add(label_4, 0, 0, 0) sizer_12.Add(self.text_move_x, 0, 0, 0) sizer_11.Add(sizer_12, 2, 0, 0) label_5 = wx.StaticText(self.panel_4, wx.ID_ANY, " Y") sizer_13.Add(label_5, 0, 0, 0) sizer_13.Add(self.text_move_y, 0, 0, 0) sizer_11.Add(sizer_13, 2, wx.EXPAND, 0) self.panel_4.SetSizer(sizer_11) sizer_7.Add(self.panel_4, 0, wx.EXPAND, 0) self.panel_2.SetSizer(sizer_7) sizer_2.Add(self.panel_2, 3, wx.EXPAND, 0) static_line_2 = wx.StaticLine(self.panel_6, wx.ID_ANY) sizer_2.Add(static_line_2, 0, wx.EXPAND, 0) sizer_15.Add(self.button_raster, 0, 0, 0) sizer_15.Add(self.text_raster_speed, 0, wx.EXPAND, 0) label_6 = wx.StaticText(self.panel_3, wx.ID_ANY, "mm/s") sizer_15.Add(label_6, 1, 0, 0) sizer_14.Add(sizer_15, 0, wx.EXPAND, 0) sizer_16.Add(self.button_engrave, 0, 0, 0) sizer_16.Add(self.text_engrave_speed, 0, wx.EXPAND, 0) label_7 = wx.StaticText(self.panel_3, wx.ID_ANY, "mm/s") sizer_16.Add(label_7, 1, 0, 0) sizer_14.Add(sizer_16, 0, wx.EXPAND, 0) sizer_17.Add(self.button_cut, 1, 0, 0) sizer_17.Add(self.text_cut_speed, 1, wx.EXPAND, 0) label_8 = wx.StaticText(self.panel_3, wx.ID_ANY, "mm/s") sizer_17.Add(label_8, 1, 0, 0) sizer_14.Add(sizer_17, 2, wx.EXPAND, 0) sizer_14.Add(self.button_pause_stop, 0, wx.EXPAND, 0) self.panel_3.SetSizer(sizer_14) sizer_2.Add(self.panel_3, 0, wx.EXPAND, 0) self.panel_6.SetSizer(sizer_2) sizer_1.Add(self.panel_6, 1, wx.EXPAND, 0) static_line_8 = wx.StaticLine( self.advanced_settings, wx.ID_ANY, style=wx.LI_VERTICAL ) sizer_20.Add(static_line_8, 0, wx.EXPAND, 0) label_9 = wx.StaticText(self.advanced_settings, wx.ID_ANY, "Advanced Settings") sizer_19.Add(label_9, 0, wx.ALIGN_CENTER_HORIZONTAL, 0) static_line_3 = wx.StaticLine(self.advanced_settings, wx.ID_ANY) sizer_19.Add(static_line_3, 0, wx.EXPAND, 0) sizer_19.Add(self.checkbox_halftone, 1, 0, 0) sizer_19.Add(self.checkbox_invert, 1, 0, 0) static_line_4 = wx.StaticLine(self.advanced_settings, wx.ID_ANY) sizer_19.Add(static_line_4, 0, wx.EXPAND, 0) sizer_19.Add(self.checkbox_mirror, 1, 0, 0) sizer_19.Add(self.checkbox_rotate, 1, 0, 0) sizer_19.Add(self.checkbox_csys, 1, 0, 0) static_line_5 = wx.StaticLine(self.advanced_settings, wx.ID_ANY) sizer_19.Add(static_line_5, 0, wx.EXPAND, 0) sizer_19.Add(self.checkbox_cut_inner, 1, 0, 0) sizer_19.Add(self.checkbox_rotary_enable, 1, 0, 0) sizer_19.Add((20, 20), 100, 0, 0) sizer_19.Add(self.checkbox_group_engrave, 1, 0, 0) sizer_19.Add(self.checkbox_group_vector, 1, 0, 0) static_line_7 = wx.StaticLine(self.advanced_settings, wx.ID_ANY) sizer_19.Add(static_line_7, 0, wx.EXPAND, 0) label_12 = wx.StaticText( self.advanced_settings, wx.ID_ANY, "Raster Eng. Passes" ) sizer_21.Add(label_12, 0, 0, 0) sizer_21.Add(self.text_raster_passes, 0, 0, 0) sizer_19.Add(sizer_21, 1, wx.EXPAND, 0) label_13 = wx.StaticText( self.advanced_settings, wx.ID_ANY, "Vector Eng. Passes" ) sizer_22.Add(label_13, 0, 0, 0) sizer_22.Add(self.text_engrave_passes, 0, 0, 0) sizer_19.Add(sizer_22, 1, wx.EXPAND, 0) label_14 = wx.StaticText(self.advanced_settings, wx.ID_ANY, "Vector Cut Passes") sizer_23.Add(label_14, 0, 0, 0) sizer_23.Add(self.text_cut_passes, 0, 0, 0) sizer_19.Add(sizer_23, 1, wx.EXPAND, 0) sizer_19.Add(self.button_hide_advanced, 1, wx.EXPAND, 0) sizer_20.Add(sizer_19, 0, 0, 0) self.advanced_settings.SetSizer(sizer_20) sizer_1.Add(self.advanced_settings, 1, wx.EXPAND, 0) sizer_1.Add(self.scene, 5, wx.EXPAND, 0) self.SetSizer(sizer_1) self.Layout() # end wxGlade def toggle_advance_settings(self): if self.advanced_settings.IsShown(): self.advanced_settings.Hide() else: self.advanced_settings.Show() self.context.console("refresh\n") def on_menu_save(self, event): # wxGlade: Silence.<event_handler> self.context.console("settings_save\n") def on_menu_load(self, event): # wxGlade: Silence.<event_handler> self.context.console("settings_load\n") def on_menu_open(self, event): # wxGlade: Silence.<event_handler> self.context.console("design_load\n") def on_menu_reload(self, event): # wxGlade: Silence.<event_handler> if self.context.working_file is not None: self.context.console("design_load %s\n" % self.context.working_file) def on_menu_egv(self, event): # wxGlade: Silence.<event_handler> self.context.console("egv_load\n") def on_menu_egv_save(self, event): # wxGlade: Silence.<event_handler> self.context.console("window open EgvSave\n") def on_menu_exit(self, event): # wxGlade: Silence.<event_handler> self.context.console("quit\n") def on_menu_refresh(self, event): # wxGlade: Silence.<event_handler> self.context.console("refresh\n") def on_menu_view_raster(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle raster\n") def on_menu_view_engrave(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle engrave\n") def on_menu_view_cut(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle cut\n") def on_menu_view_gcode(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle gcode\n") def on_menu_view_estimate(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle estimate\n") def on_menu_view_zoom(self, event): # wxGlade: Silence.<event_handler> self.context.console("view_setting toggle zoom\n") def on_menu_calc_raster(self, event): # wxGlade: Silence.<event_handler> self.context.console("calculate raster\n") def on_menu_convex_hull(self, event): # wxGlade: Silence.<event_handler> self.context.console("window open TraceBoundary\n") def on_menu_initialize(self, event): # wxGlade: Silence.<event_handler> self.context.console("device -p / init Lhystudios activate start\n") def on_menu_reset_usb(self, event): # wxGlade: Silence.<event_handler> self.context.console("device reset\n") def on_menu_release_usb(self, event): # wxGlade: Silence.<event_handler> self.context.console("device release\n") def on_menu_settings_general(self, event): # wxGlade: Silence.<event_handler> self.context.console("window open GeneralSettings\n") def on_menu_settings_raster(self, event): # wxGlade: Silence.<event_handler> self.context.console("window open RasterSettings\n") def on_menu_settings_rotary(self, event): # wxGlade: Silence.<event_handler> self.context.console("window open RotarySettings\n") def on_menu_settings_advanced(self, event): # wxGlade: Silence.<event_handler> self.toggle_advance_settings() def on_menu_help_about(self, event): # wxGlade: Silence.<event_handler> self.context.console("webhelp about\n") def on_menu_help_webpage(self, event): # wxGlade: Silence.<event_handler> self.context.console("webhelp webpage\n") def on_menu_help_manual(self, event): # wxGlade: Silence.<event_handler> self.context.console("webhelp manual\n") def on_button_initialize_laser(self, event): # wxGlade: Silence.<event_handler> if self.context.board == "Moshiboard": self.context.console("device -p / init Moshi activate start\n") else: self.context.console("device -p / init Lhystudios activate start\n") def on_button_open_design(self, event): # wxGlade: Silence.<event_handler> self.context.console("design_load\n") def on_button_reload_design(self, event): # wxGlade: Silence.<event_handler> if self.context.working_file is not None: self.context.console("design_load %s\n" % self.context.working_file) def on_button_home(self, event): # wxGlade: Silence.<event_handler> self.context.console("home\n") def on_button_unlock_rail(self, event): # wxGlade: Silence.<event_handler> self.context.console("unlock\n") def on_text_jog_step(self, event): # wxGlade: Silence.<event_handler> try: self.context.jog_step = float(self.text_jog_step.GetValue()) except ValueError: pass def on_button_align_top_left(self, event): # wxGlade: Silence.<event_handler> self.context.console("align top left\n") def on_button_jog_top(self, event): # wxGlade: Silence.<event_handler> self.context.console("top %fmm\n" % self.context.jog_step) def on_button_align_top_right(self, event): # wxGlade: Silence.<event_handler> self.context.console("align top right\n") def on_button_jog_left(self, event): # wxGlade: Silence.<event_handler> self.context.console("left %fmm\n" % self.context.jog_step) def on_button_align_center(self, event): # wxGlade: Silence.<event_handler> self.context.console("align center center\n") def on_button_jog_right(self, event): # wxGlade: Silence.<event_handler> self.context.console("right %fmm\n" % self.context.jog_step) def on_button_align_bottom_left(self, event): # wxGlade: Silence.<event_handler> self.context.console("align bottom left\n") def on_button_jog_bottom(self, event): # wxGlade: Silence.<event_handler> self.context.console("bottom %fmm\n" % self.context.jog_step) def on_button_align_bottom_right(self, event): # wxGlade: Silence.<event_handler> self.context.console("align bottom right\n") def on_button_move(self, event): # wxGlade: Silence.<event_handler> self.context.console( "move_absolute %fmm %fmm\n" % (self.context.move_x, self.context.move_y) ) def on_text_move_x(self, event): # wxGlade: Silence.<event_handler> try: self.context.move_x = float(self.text_move_x.GetValue()) except ValueError: pass def on_text_move_y(self, event): # wxGlade: Silence.<event_handler> try: self.context.move_y = float(self.text_move_y.GetValue()) except ValueError: pass def on_button_raster_engrave(self, event): # wxGlade: Silence.<event_handler> self.context.console("raster execute\n") def on_text_raster_speed(self, event): # wxGlade: Silence.<event_handler> try: self.context.raster_settings.speed = float(self.text_raster_speed.GetValue()) except ValueError: pass def on_button_vector_engrave(self, event): # wxGlade: Silence.<event_handler> self.context.console("engrave execute\n") def on_text_engrave_speed(self, event): # wxGlade: Silence.<event_handler> try: self.context.engrave_settings.speed = float(self.text_engrave_speed.GetValue()) except ValueError: pass def on_button_vector_cut(self, event): # wxGlade: Silence.<event_handler> self.context.console("cut execute\n") def on_text_cut_speed(self, event): # wxGlade: Silence.<event_handler> try: self.context.cut_settings.speed = float(self.text_cut_speed.GetValue()) except ValueError: pass def on_button_pause_stop(self, event): # wxGlade: Silence.<event_handler> self.context("pause\n") dlg = wx.MessageDialog( None, _("Press OK to abort.\n" "Cancel to resume the job."), _("Stop Laser Sending?"), wx.OK | wx.CANCEL | wx.ICON_WARNING, ) result = dlg.ShowModal() if result == wx.ID_OK: self.context("abort\n") else: self.context("pause\n") self.context("resume\n") dlg.Destroy() def on_check_halftone(self, event): # wxGlade: Silence.<event_handler> self.context.halftone = bool(self.checkbox_halftone.GetValue()) self.context.signal("halftone", self.context.halftone) def on_check_invert(self, event): # wxGlade: Silence.<event_handler> self.context.invert = bool(self.checkbox_invert.GetValue()) def on_check_mirror(self, event): # wxGlade: Silence.<event_handler> self.context.mirror = bool(self.checkbox_mirror.GetValue()) def on_check_rotate(self, event): # wxGlade: Silence.<event_handler> self.context.rotate = bool(self.checkbox_rotate.GetValue()) def on_check_csys(self, event): # wxGlade: Silence.<event_handler> self.context.csys = bool(self.checkbox_csys.GetValue()) def on_check_cut_inside(self, event): # wxGlade: Silence.<event_handler> self.context.cut_inner = bool(self.checkbox_cut_inner.GetValue()) def on_check_use_rotary(self, event): # wxGlade: Silence.<event_handler> self.context.rotary_enable = bool(self.checkbox_rotary_enable.GetValue()) self.context.signal("rotary_enable", self.context.rotary_enable) def on_check_group_engrave(self, event): # wxGlade: Silence.<event_handler> self.context.group_engrave = bool(self.checkbox_group_engrave.GetValue()) def on_check_group_vector(self, event): # wxGlade: Silence.<event_handler> self.context.group_vector = bool(self.checkbox_group_vector.GetValue()) def on_text_raster_engrave_passes(self, event): # wxGlade: Silence.<event_handler> try: self.context.raster_settings.passes = int(self.text_raster_passes.GetValue()) except ValueError: pass def on_text_vector_engrave_passes(self, event): # wxGlade: Silence.<event_handler> try: self.context.engrave_settings.passes = int(self.text_engrave_passes.GetValue()) except ValueError: pass def on_text_vector_cut_passes(self, event): # wxGlade: Silence.<event_handler> try: self.context.cut_settings.passes = int(self.text_cut_passes.GetValue()) except ValueError: pass def on_button_hide_advanced(self, event): # wxGlade: Silence.<event_handler> self.toggle_advance_settings() # Scene information def on_size(self, event): if self.context is None: return self.Layout() self.widget_scene.signal("guide") self.request_refresh() def set_buffer(self): width, height = self.scene.ClientSize if width <= 0: width = 1 if height <= 0: height = 1 self._Buffer = wx.Bitmap(width, height) def on_paint(self, event): try: if self._Buffer is None: self.update_buffer_ui_thread() wx.BufferedPaintDC(self.scene, self._Buffer) except RuntimeError: pass def on_space_changed(self, *args): self.widget_scene.signal("grid") self.widget_scene.signal("guide") self.request_refresh() def on_draw_mode(self, mode): self.silence_menubar.view_gcode.Check(bool(mode & DRAW_MODE_GCODE)) self.silence_menubar.view_cut.Check(bool(mode & DRAW_MODE_CUT)) self.silence_menubar.view_raster.Check(bool(mode & DRAW_MODE_RASTER)) self.silence_menubar.view_engrave.Check(bool(mode & DRAW_MODE_ENGRAVE)) self.silence_menubar.view_estimate.Check(bool(mode & DRAW_MODE_ESTIMATE)) self.silence_menubar.view_zoom.Check(bool(mode & DRAW_MODE_ZOOM)) def on_bed_changed(self, *args): self.widget_scene.signal("grid") self.request_refresh() def on_refresh_scene(self, *args): """ Called by 'refresh_scene' change. To refresh tree. :param args: :return: """ self.on_statusbar() self.request_refresh() def on_erase(self, event): pass def request_refresh_for_animation(self): """Called on the various signals trying to animate the screen.""" try: if self.context.draw_mode & DRAW_MODE_ANIMATE == 0: self.request_refresh() except AttributeError: pass def request_refresh(self): """Request an update to the scene.""" try: if self.context.draw_mode & DRAW_MODE_REFRESH == 0: self.screen_refresh_is_requested = True except AttributeError: pass def refresh_scene(self): """Called by the Scheduler at a given the specified framerate.""" if self.screen_refresh_is_requested and not self.screen_refresh_is_running: self.screen_refresh_is_running = True if self.screen_refresh_lock.acquire(timeout=1): if not wx.IsMainThread(): wx.CallAfter(self._refresh_in_ui) else: self._refresh_in_ui() else: self.screen_refresh_is_requested = False self.screen_refresh_is_running = False def _refresh_in_ui(self): """Called by refresh_scene() in the UI thread.""" if self.context is None: return self.update_buffer_ui_thread() self.scene.Refresh() self.scene.Update() self.screen_refresh_is_requested = False self.screen_refresh_is_running = False self.screen_refresh_lock.release() def update_buffer_ui_thread(self): """Performs the redraw of the data in the UI thread.""" dm = self.context.draw_mode if self._Buffer is None or self._Buffer.GetSize() != self.scene.ClientSize: self.set_buffer() dc = wx.MemoryDC() dc.SelectObject(self._Buffer) dc.SetBackground(self.background_brush) dc.Clear() w, h = dc.Size if dm & DRAW_MODE_FLIPXY != 0: dc.SetUserScale(-1, -1) dc.SetLogicalOrigin(w, h) gc = wx.GraphicsContext.Create(dc) gc.Size = dc.Size # gc.laserpath = self.laserpath font = wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD) gc.SetFont(font, wx.BLACK) if self.widget_scene is not None: self.widget_scene.draw(gc) if dm & DRAW_MODE_INVERT != 0: dc.Blit(0, 0, w, h, dc, 0, 0, wx.SRC_INVERT) gc.Destroy() del dc # Mouse Events. def on_mousewheel(self, event): if self.scene.HasCapture(): return rotation = event.GetWheelRotation() if event.GetWheelAxis() == wx.MOUSE_WHEEL_VERTICAL and not event.ShiftDown(): if event.HasAnyModifiers(): if rotation > 1: self.widget_scene.event(event.GetPosition(), "wheelup_ctrl") elif rotation < -1: self.widget_scene.event(event.GetPosition(), "wheeldown_ctrl") else: if rotation > 1: self.widget_scene.event(event.GetPosition(), "wheelup") elif rotation < -1: self.widget_scene.event(event.GetPosition(), "wheeldown") else: if rotation > 1: self.widget_scene.event(event.GetPosition(), "wheelleft") elif rotation < -1: self.widget_scene.event(event.GetPosition(), "wheelright") def on_mousewheel_zoom(self, event): if self.scene.HasCapture(): return rotation = event.GetWheelRotation() if self.context.mouse_zoom_invert: rotation = -rotation if rotation > 1: self.widget_scene.event(event.GetPosition(), "wheelup") elif rotation < -1: self.widget_scene.event(event.GetPosition(), "wheeldown") def on_mouse_middle_down(self, event): self.scene.SetFocus() if not self.scene.HasCapture(): self.scene.CaptureMouse() self.widget_scene.event(event.GetPosition(), "middledown") def on_mouse_middle_up(self, event): if self.scene.HasCapture(): self.scene.ReleaseMouse() self.widget_scene.event(event.GetPosition(), "middleup") def on_left_mouse_down(self, event): self.scene.SetFocus() if not self.scene.HasCapture(): self.scene.CaptureMouse() self.widget_scene.event(event.GetPosition(), "leftdown") def on_left_mouse_up(self, event): if self.scene.HasCapture(): self.scene.ReleaseMouse() self.widget_scene.event(event.GetPosition(), "leftup") def on_mouse_double_click(self, event): if self.scene.HasCapture(): return self.widget_scene.event(event.GetPosition(), "doubleclick") def on_mouse_move(self, event): if not event.Dragging(): self.widget_scene.event(event.GetPosition(), "hover") return self.widget_scene.event(event.GetPosition(), "move") def on_right_mouse_down(self, event): self.scene.SetFocus() if event.AltDown(): self.widget_scene.event(event.GetPosition(), "rightdown+alt") elif event.ControlDown(): self.widget_scene.event(event.GetPosition(), "rightdown+control") else: self.widget_scene.event(event.GetPosition(), "rightdown") def on_right_mouse_up(self, event): self.widget_scene.event(event.GetPosition(), "rightup") def on_magnify_mouse(self, event): magnify = event.GetMagnification() if magnify > 0: self.widget_scene.event(event.GetPosition(), "zoom-in") if magnify < 0: self.widget_scene.event(event.GetPosition(), "zoom-out") def on_gesture(self, event): """ This code requires WXPython 4.1 and the bind will fail otherwise. """ if event.IsGestureStart(): self.widget_scene.event(event.GetPosition(), "gesture-start") elif event.IsGestureEnd(): self.widget_scene.event(event.GetPosition(), "gesture-end") else: try: zoom = event.GetZoomFactor() except AttributeError: zoom = 1.0 self.widget_scene.event(event.GetPosition(), "zoom %f" % zoom) # end of class Silence
PypiClean
/Acolyte-0.0.1.tar.gz/Acolyte-0.0.1/acolyte/core/job_service.py
from acolyte.util.validate import ( check, StrField, IntField, BadReq, ) from acolyte.core.service import ( Result, AbstractService ) from acolyte.core.storage.job_instance import JobInstanceDAO from acolyte.core.storage.job_action_data import JobActionDataDAO from acolyte.core.storage.user import UserDAO from acolyte.core.view import ( JobDetailsView, JobInstanceDetailsView, DecisionView, ) from acolyte.exception import ObjectNotFoundException class JobService(AbstractService): def __init__(self, service_container): super().__init__(service_container) self._job_mgr = self._("job_manager") self._db = self._("db") self._job_instance_dao = JobInstanceDAO(self._db) self._job_action_data_dao = JobActionDataDAO(self._db) self._user_dao = UserDAO(self._db) @check( StrField("job_name", required=True) ) def get_job_details_by_name(self, job_name): """根据job名称来获取job定义详情 """ try: job_define = self._job_mgr.get(job_name) except ObjectNotFoundException: raise BadReq("job_not_found", job_name=job_name) return Result.ok(data=JobDetailsView.from_job(job_define)) def get_all_job_definations(self): """获取所有的Job定义 """ ... def get_job_instance_list_by_flow_instance(self, flow_instance_id): """根据flow_instance_id获取job_instance列表 """ ... @check( IntField("job_instance_id", required=True), ) def get_job_instance_details(self, job_instance_id): """获取某个job_instance的详情数据,包括每个其中每个event的数据 """ job_instance = self._job_instance_dao.query_by_id( job_instance_id) if job_instance is None: raise BadReq( "instance_not_found", job_instance_id=job_instance.id) action_data_list = self._job_action_data_dao\ .query_by_instance_id(job_instance.id) actor_id_list = [action_data.actor for action_data in action_data_list] actors = self._user_dao.query_users_by_id_list(actor_id_list, True) return Result.ok(data=JobInstanceDetailsView.from_job_instance( job_instance, action_data_list, actors)) @check( IntField("job_instance_id", required=True), StrField("decision_name", required=True) ) def get_decision_info(self, job_instance_id, decision_name): """获取Job的某个Decison摘要 """ job_instance = self._job_instance_dao.query_by_id(job_instance_id) if job_instance is None: raise BadReq("job_instance_not_found", job_instance_id=job_instance_id) job_define = self._job_mgr.get(job_instance.job_name) decision_define = job_define.get_decision(decision_name) if decision_define is None: raise BadReq("decision_not_found", decision_name=decision_name) return Result.ok(DecisionView.from_decision_define( decision_define, job_define))
PypiClean
/MGP_SDK-1.1.1.tar.gz/MGP_SDK-1.1.1/src/MGP_SDK/account_service/addresses.py
import json import requests import MGP_SDK.process as process class Address: def __init__(self, auth): self.base_url = auth.api_base_url self.response = None self.version = auth.version self.auth = auth def get_address(self, address_id): """ Function lists an address' details Args: address_id (int) = ID of the address Returns: Dictionary of the desired address' details """ authorization = process.authorization(self.auth) url = self.base_url + '/account-service/api/v1/addresses/{}'.format(address_id) response = requests.request("GET", url, headers=authorization, verify=self.auth.SSL) process._response_handler(response) return response.json() def add_address(self, city, country, postal_code, state, street1, **kwargs): """ Function creates a new address Args: city (string) = Desired city for the new address. Must have correct capitalization country (string) = Desired country for the new address. Must have correct capitalization postal_code (string) = Desired postal/zip code for the new address. Must be in 5 digit format state (string) = Desired state for the new address in abbreviated form. Must be capitalized street1 (string) = Desired street address for the new address Kwargs: phone (string) = Desired phone number for the new address. Must be in XXX-XXX-XXXX form streetAddress2 (string) = Desired secondary street address for the new address Returns: Dictionary of the new address' details """ authorization = process.authorization(self.auth) url = self.base_url + '/account-service/api/v1/addresses' payload = { "city": city.title(), "country": country.title(), "postalCode": postal_code, "state": state.upper(), "streetAddress1": street1 } for item in kwargs.keys(): if item == "phone" and len(kwargs[item]) != 12: raise Exception("{} is not a valid phone number. " "Phone number must be 10 digits long and separated by two hyphens".format(kwargs[item])) payload.update({item: kwargs[item]}) payload = json.dumps(payload) response = requests.request("POST", url, headers=authorization, data=payload, verify=self.auth.SSL) process._response_handler(response) return response.json() def update_address(self, address_id, **kwargs): """ Function updates an existing address' details Args: address_id (int) = ID of the address Kwargs: city (string) = Desired city for the new address. Must have correct capitalization country (string) = Desired country for the new address. Must have correct capitalization postalCode (string) = Desired postal/zip code for the new address. Must be in 5 digit format state (string) = Desired state for the new address in abbreviated form. Must be capitalized phone (string) = Desired phone number for the new address. Must be in XXX-XXX-XXXX form streetAddress1 (string) = Desired street address for the new address streetAddress2 (string) = Desired secondary street address for the new address Returns: Dictionary of the updated address' details """ authorization = process.authorization(self.auth) url = self.base_url + '/account-service/api/v1/addresses/' address_info = self.get_address(address_id) payload = { "id": address_info["id"], "streetAddress1": address_info["streetAddress1"], "streetAddress2": address_info["streetAddress2"], "city": address_info["city"], "country": address_info["country"], "state": address_info["state"], "postalCode": address_info["postalCode"], "phone": address_info["phone"] } for item in kwargs.keys(): if item == "id": raise Exception("Address ID cannot be updated") if item == "phone" and len(kwargs[item]) != 12: raise Exception("{} is not a valid phone number. " "Phone number must be 10 digits long and separated by two hyphens".format(kwargs[item])) payload.update({item: kwargs[item]}) payload = json.dumps(payload) response = requests.request("PUT", url, headers=authorization, data=payload, verify=self.auth.SSL) process._response_handler(response) return response.json() def delete_address(self, address_id): """ Function deletes an address Args: address_id (int) = ID of the address Returns: Message of successful deletion """ authorization = process.authorization(self.auth) url = self.base_url + '/account-service/api/v1/addresses?id={}'.format(address_id) response = requests.request("DELETE", url, headers=authorization, verify=self.auth.SSL) process._response_handler(response) return "Address {} successfully deleted".format(address_id)
PypiClean
/Flask-AppBuilder-jack-3.3.4.tar.gz/Flask-AppBuilder-jack-3.3.4/flask_appbuilder/filemanager.py
import logging import os import os.path as op import re import uuid from flask.globals import _request_ctx_stack from werkzeug.datastructures import FileStorage from werkzeug.utils import secure_filename from wtforms import ValidationError try: from flask import _app_ctx_stack except ImportError: _app_ctx_stack = None app_stack = _app_ctx_stack or _request_ctx_stack log = logging.getLogger(__name__) try: from PIL import Image, ImageOps except ImportError: Image = None ImageOps = None class FileManager(object): def __init__( self, base_path=None, relative_path="", namegen=None, allowed_extensions=None, permission=0o755, **kwargs ): ctx = app_stack.top if "UPLOAD_FOLDER" in ctx.app.config and not base_path: base_path = ctx.app.config["UPLOAD_FOLDER"] if not base_path: raise Exception("Config key UPLOAD_FOLDER is mandatory") self.base_path = base_path self.relative_path = relative_path self.namegen = namegen or uuid_namegen if not allowed_extensions and "FILE_ALLOWED_EXTENSIONS" in ctx.app.config: self.allowed_extensions = ctx.app.config["FILE_ALLOWED_EXTENSIONS"] else: self.allowed_extensions = allowed_extensions self.permission = permission self._should_delete = False def is_file_allowed(self, filename): if not self.allowed_extensions: return True return ( "." in filename and filename.rsplit(".", 1)[1].lower() in self.allowed_extensions ) def generate_name(self, obj, file_data): return self.namegen(file_data) def get_path(self, filename): if not self.base_path: raise ValueError("FileUploadField field requires base_path to be set.") return op.join(self.base_path, filename) def delete_file(self, filename): path = self.get_path(filename) if op.exists(path): os.remove(path) def save_file(self, data, filename): filename_ = secure_filename(filename) path = self.get_path(filename_) if not op.exists(op.dirname(path)): os.makedirs(os.path.dirname(path), self.permission) data.save(path) return filename_ class ImageManager(FileManager): """ Image Manager will manage your image files referenced on SQLAlchemy Model will save files on IMG_UPLOAD_FOLDER as <uuid>_sep_<filename> """ keep_image_formats = ("PNG",) def __init__( self, base_path=None, relative_path=None, max_size=None, namegen=None, allowed_extensions=None, thumbgen=None, thumbnail_size=None, permission=0o755, **kwargs ): # Check if PIL is installed if Image is None: raise Exception("PIL library was not found") ctx = app_stack.top if "IMG_SIZE" in ctx.app.config and not max_size: self.max_size = ctx.app.config["IMG_SIZE"] if "IMG_UPLOAD_URL" in ctx.app.config and not relative_path: relative_path = ctx.app.config["IMG_UPLOAD_URL"] if not relative_path: raise Exception("Config key IMG_UPLOAD_URL is mandatory") if "IMG_UPLOAD_FOLDER" in ctx.app.config and not base_path: base_path = ctx.app.config["IMG_UPLOAD_FOLDER"] if not base_path: raise Exception("Config key IMG_UPLOAD_FOLDER is mandatory") self.thumbnail_fn = thumbgen or thumbgen_filename self.thumbnail_size = thumbnail_size self.image = None if not allowed_extensions: allowed_extensions = ("gif", "jpg", "jpeg", "png", "tiff") super(ImageManager, self).__init__( base_path=base_path, relative_path=relative_path, namegen=namegen, allowed_extensions=allowed_extensions, permission=permission, **kwargs ) def get_url(self, filename): if isinstance(filename, FileStorage): return filename.filename return self.relative_path + filename def get_url_thumbnail(self, filename): if isinstance(filename, FileStorage): return filename.filename return self.relative_path + thumbgen_filename(filename) # Deletion def delete_file(self, filename): super(ImageManager, self).delete_file(filename) self.delete_thumbnail(filename) def delete_thumbnail(self, filename): path = self.get_path(self.thumbnail_fn(filename)) if op.exists(path): os.remove(path) # Saving def save_file(self, data, filename, size=None, thumbnail_size=None): """ Saves an image File :param data: FileStorage from Flask form upload field :param filename: Filename with full path """ max_size = size or self.max_size thumbnail_size = thumbnail_size or self.thumbnail_size if data and isinstance(data, FileStorage): try: self.image = Image.open(data) except Exception as e: raise ValidationError("Invalid image: %s" % e) path = self.get_path(filename) # If Path does not exist, create it if not op.exists(op.dirname(path)): os.makedirs(os.path.dirname(path), self.permission) # Figure out format filename, format = self.get_save_format(filename, self.image) if self.image and (self.image.format != format or max_size): if max_size: image = self.resize(self.image, max_size) else: image = self.image self.save_image(image, self.get_path(filename), format) else: data.seek(0) data.save(path) self.save_thumbnail(data, filename, format, thumbnail_size) return filename def save_thumbnail(self, data, filename, format, thumbnail_size=None): thumbnail_size = thumbnail_size or self.thumbnail_size if self.image and thumbnail_size: path = self.get_path(self.thumbnail_fn(filename)) self.save_image(self.resize(self.image, thumbnail_size), path, format) def resize(self, image, size): """ Resizes the image :param image: The image object :param size: size is PIL tuple (width, heigth, force) ex: (200,100,True) """ (width, height, force) = size if image.size[0] > width or image.size[1] > height: if force: return ImageOps.fit(self.image, (width, height), Image.ANTIALIAS) else: thumb = self.image.copy() thumb.thumbnail((width, height), Image.ANTIALIAS) return thumb return image def save_image(self, image, path, format="JPEG"): if image.mode not in ("RGB", "RGBA"): image = image.convert("RGBA") with open(path, "wb") as fp: image.save(fp, format) def get_save_format(self, filename, image): if image.format not in self.keep_image_formats: name, ext = op.splitext(filename) filename = "%s.jpg" % name return filename, "JPEG" return filename, image.format def uuid_namegen(file_data): return str(uuid.uuid1()) + "_sep_" + file_data.filename def get_file_original_name(name): """ Use this function to get the user's original filename. Filename is concatenated with <UUID>_sep_<FILE NAME>, to avoid collisions. Use this function on your models on an aditional function :: class ProjectFiles(Base): id = Column(Integer, primary_key=True) file = Column(FileColumn, nullable=False) def file_name(self): return get_file_original_name(str(self.file)) :param name: The file name from model :return: Returns the user's original filename removes <UUID>_sep_ """ re_match = re.findall(".*_sep_(.*)", name) if re_match: return re_match[0] else: return "Not valid" def uuid_originalname(uuid_filename): return uuid_filename.split("_sep_")[1] def thumbgen_filename(filename): name, ext = op.splitext(filename) return "%s_thumb%s" % (name, ext)
PypiClean
/LiBai-0.1.1.tar.gz/LiBai-0.1.1/libai/data/data_utils/reindexed_dataset.py
import logging import os import time import numpy as np import oneflow as flow from libai.utils import distributed as dist logger = logging.getLogger(__name__) def get_samples_mapping(data_prefix, indexed_dataset, max_seq_length, short_seq_prob, binary_head): """Get a list that maps a sample index to a starting sentence index, end sentence index, and length""" # Filename of the index mapping indexmap_filename = data_prefix indexmap_filename += "_{}msl".format(max_seq_length) indexmap_filename += "_{}ssp".format(short_seq_prob) indexmap_filename += "_sample_mapping.npy" documents = indexed_dataset.doc_idx sizes = indexed_dataset.sizes # Build the indexed mapping if not exist. if flow.env.get_rank() == 0 and not os.path.isfile(indexmap_filename): logger.info( "WARNING: could not find index map file {}, building " "the indices on rank 0 ...".format(indexmap_filename) ) # Build samples mapping verbose = flow.env.get_rank() == 0 start_time = time.time() logger.info("building samples index mapping for {} ...".format(data_prefix)) from libai.data.data_utils import helpers samples_mapping = helpers.build_mapping( documents, sizes, max_seq_length, short_seq_prob, verbose, 2 if binary_head else 1, ) logger.info("done building samples index maping") np.save(indexmap_filename, samples_mapping, allow_pickle=True) logger.info("saved the index mapping in {}".format(indexmap_filename)) # Make sure all the ranks have built the mapping logger.info( "elapsed time to build and save samples mapping " "(seconds): {:4f}".format(time.time() - start_time) ) dist.synchronize() # Load indexed dataset. logger.info("loading indexed mapping from {}".format(indexmap_filename)) start_time = time.time() samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode="r") logger.info("loaded indexed file in {:3.3f} seconds".format(time.time() - start_time)) logger.info("total number of samples: {}".format(samples_mapping.shape[0])) return samples_mapping class SentenceIndexedDataset(flow.utils.data.Dataset): """This class is propused for building sample mapping index from `indexed_dataset` to actural dataset. It will combine as many consecutive sentences as possible in the same document without exceeding `max_seq_length`. When it does not reach maximum length, the pad will be filled later. All the sentences in it are complete. `binary_head` controls whether to return one or two sentences, which will be used in Bert. """ def __init__( self, data_prefix, indexed_dataset, max_seq_length=512, short_seq_prob=0.0, binary_head=False, ): self.max_seq_length = max_seq_length self.short_seq_prob = short_seq_prob self.binary_head = binary_head self.indexed_dataset = indexed_dataset self.samples_mapping = get_samples_mapping( data_prefix, self.indexed_dataset, self.max_seq_length, self.short_seq_prob, self.binary_head, ) def __len__(self): return self.samples_mapping.shape[0] def __getitem__(self, idx): start_idx, end_idx, seq_length = self.samples_mapping[idx] sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] assert seq_length <= self.max_seq_length return sample @property def supports_prefetch(self): return self.indexed_dataset.supports_prefetch def prefetch(self, indices): new_indices = [] for idx in indices: start_idx, end_idx, _ = self.samples_mapping[idx] new_indices.extend([i for i in range(start_idx, end_idx)]) self.indexed_dataset.prefetch(new_indices) def build_index_mappings(data_prefix, indexed_dataset, max_seq_length): """Build sample-idx. sample-idx: is the start document index and document offset for each training sample. """ # Filename of the index mappings. indexmap_filename = data_prefix indexmap_filename += "_{}msl".format(max_seq_length) indexmap_filename += "_sample_idx.npy" documents = indexed_dataset.doc_idx.astype(np.int64) sizes = indexed_dataset.sizes.astype(np.int64) num_tokens = np.sum(sizes[documents[:-1]]) # Build the indexed mapping if not exist. if flow.env.get_rank() == 0 and not os.path.isfile(indexmap_filename): logger.info("could not find index map files, building the indices on rank 0 ...") # sample-idx. start_time = time.time() from libai.data.data_utils import helpers sample_idx = helpers.build_sample_idx(documents, sizes, max_seq_length, num_tokens) np.save(indexmap_filename, sample_idx, allow_pickle=True) logger.info( "elasped time to build and save sample-idx mapping " "(seconds): {:4f}".format(time.time() - start_time) ) dist.synchronize() # Load mappings. start_time = time.time() logger.info(" > loading sample-idx mapping from {}".format(indexmap_filename)) sample_idx = np.load(indexmap_filename, allow_pickle=True, mmap_mode="r") logger.info("loaded indexed file in {:3.3f} seconds".format(time.time() - start_time)) logger.info("total number of samples: {}".format(sample_idx.shape[0])) return sample_idx class BlockIndexedDataset(flow.utils.data.Dataset): """This class is propused for building sample mapping index from `indexed_dataset` to actural dataset. It will extract the sentence with the length of `max_seq_length` from the document. If it is less than the maximum length, it will be intercepted from the next document. Therefore, it always returns sentences with `max_seq_length`, but it may contain incomplete sentences. This is used for GPT training, and it can reduce padding and improve training efficiency. """ def __init__(self, data_prefix, indexed_dataset, max_seq_length=512): self.max_seq_length = max_seq_length self.indexed_dataset = indexed_dataset self.doc_idx = indexed_dataset.doc_idx self.sample_idx = build_index_mappings( data_prefix, self.indexed_dataset, self.max_seq_length ) def __len__(self): return self.sample_idx.shape[0] - 1 def __getitem__(self, idx): doc_index_f = self.sample_idx[idx][0] doc_index_l = self.sample_idx[idx + 1][0] offset_f = self.sample_idx[idx][1] offset_l = self.sample_idx[idx + 1][1] if doc_index_f == doc_index_l: sample = self.indexed_dataset.get( self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1 ) else: # Otherwise, get the rest of the initial document. sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)] # Loop over all in between documents and add the entire document. for i in range(doc_index_f + 1, doc_index_l): sample_list.append(self.indexed_dataset.get(self.doc_idx[i])) # And finally add the relevant portion of last document. sample_list.append( self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1) ) sample = np.concatenate(sample_list) return sample @property def supports_prefetch(self): # this dataset must be `cached`, and IndexedCachedDataset are not support prefetch. return False
PypiClean
/Adytum-PyMonitor-1.0.5.tar.bz2/Adytum-PyMonitor-1.0.5/lib/datetime/timetime.py
from datetime import datetime from datetime import time import math import pytz DAYS_IN_YEAR = 365.25 X1 = 100 X2 = 30.6001 GREGORIAN_YEAR = 1582 GREGORIAN_MONTH = 10 GREGORIAN_DAY = 15 JD_NUM1_OFFSET = 1720994.5 JULIAN_EPOCH = 2451545.0 CENTURY_DAYS = 36525.0 COEF1 = 6.697374558 COEF2 = 2400.051336 COEF3 = 0.000025862 GMST_A = 280.46061837 GMST_B = 360.98564736629 GMST_C = 0.000387933 GMST_D = 38710000 SOLAR_SIDEREAL_RATIO = 1.002737909350795 GMT = pytz.timezone('GMT') def cos(deg): return math.cos(math.radians(deg)) def sin(deg): return math.sin(math.radians(deg)) def todecimalhours(time_obj): seconds = time_obj.second/(60.**2) minutes = time_obj.minute/60. return time_obj.hour + minutes + seconds def fromdecimalhours(float_num): pass def hoursdayfraction(time_obj): return todecimalhours(time_obj)/24. def check_tz(datetime_obj): if not datetime_obj.tzinfo: raise "You must pass a datetime object with a timezone ('see pytz.timzone()')" def ut(datetime_obj): check_tz(datetime_obj) return datetime_obj.astimezone(GMT) def degreesToTime(degrees): d = degrees % 360 hour, minutes = divmod((d/360) * 24, 1) minute, seconds = divmod(minutes*60, 1) second, micro = divmod(seconds*60, 1) return time(int(hour), int(minute), int(second)) def isLeapYear(datetime_obj): ''' >>> from datetime import datetime >>> dt = datetime(1972,8,17); isLeapYear(dt) True >>> dt = datetime(2000,8,17); isLeapYear(dt) True >>> dt = datetime(2004,8,17); isLeapYear(dt) True >>> dt = datetime(2005,8,17); isLeapYear(dt) False ''' y = datetime_obj.year if (y % 400 == 0) or (y % 4 == 0 and y % 100 != 0): return True else: return False def daysInMonth(datetime_obj): ''' >>> from datetime import datetime >>> dt = datetime(2000,1,1); daysInMonth(dt) 31 >>> dt = datetime(2000,2,1); daysInMonth(dt) 29 >>> dt = datetime(2001,2,1); daysInMonth(dt) 28 >>> dt = datetime(2005,3,1); daysInMonth(dt) 31 >>> dt = datetime(2005,4,1); daysInMonth(dt) 30 >>> dt = datetime(2005,5,1); daysInMonth(dt) 31 >>> dt = datetime(2005,6,1); daysInMonth(dt) 30 >>> dt = datetime(2005,7,1); daysInMonth(dt) 31 >>> dt = datetime(2005,8,1); daysInMonth(dt) 31 >>> dt = datetime(2005,9,1); daysInMonth(dt) 30 >>> dt = datetime(2005,10,1); daysInMonth(dt) 31 >>> dt = datetime(2005,11,1); daysInMonth(dt) 30 >>> dt = datetime(2005,12,1); daysInMonth(dt) 31 ''' m = datetime_obj.month y = datetime_obj.year if m == 2: if isLeapYear(datetime_obj): return 29 else: return 28 elif m in [9,4,6,11]: return 30 elif m in [1,3,5,7,8,10,12]: return 31 def dayOfYear(datetime_obj): ''' >>> from datetime import datetime >>> dt = datetime(2000,1,1); dayOfYear(dt) 1 >>> dt = datetime(2000,12,31); dayOfYear(dt) 366 >>> dt = datetime(2005,12,31); dayOfYear(dt) 365 >>> dt = datetime(1972,8,17); dayOfYear(dt) 230 >>> dt = datetime(2005,8,17); dayOfYear(dt) 229 ''' y, m, d = datetime_obj.year, datetime_obj.month, datetime_obj.day if isLeapYear(datetime_obj): n = int((275*m)/9 - ((m + 9)/12) + int(d) - 30) else: n = int((275*m)/9 - 2*((m + 9)/12) + int(d) - 30) return n def julianToDateTime(jd): ''' >>> julianToDateTime(2451544.49999).timetuple() (1999, 12, 31, 23, 59, 59, 4, 365, 0) >>> julianToDateTime(2451544.5).timetuple() (2000, 1, 1, 0, 0, 0, 5, 1, 0) >>> julianToDateTime(2453682.54411).timetuple() (2005, 11, 8, 1, 3, 31, 1, 312, 0) >>> julianToDateTime(2453736.49999).timetuple() (2005, 12, 31, 23, 59, 59, 5, 365, 0) >>> julianToDateTime(2453736.5).timetuple() (2006, 1, 1, 0, 0, 0, 6, 1, 0) ''' if jd < 0: raise "Can't handle negative days." jd += 0.5 z = int(jd) f = jd - z a = z if z >= 2299161: alpha = int((z - 1867216.26)/36254.25) a = z + 1 + alpha - int(alpha/4) b = a + 1524 c = int((b - 122.1)/365.25) d = int(365.25 * c) e = int((b - d)/30.6001) day = b - d - int(30.6001 * e) + f if e < 13.5: month = int(e - 1) else: month = int(e - 13) if month > 2.5: year = int(c - 4716) else: year = int(c - 4715) day, hours = divmod(day, 1) hour, minutes = divmod(hours * 24, 1) minute, seconds = divmod(minutes * 60, 1) second, micros = divmod(seconds * 60, 1) micro = round(micros * 1000) return datetime(int(year), int(month), int(day), int(hour), int(minute), int(second), int(micro), GMT) def dayOfWeek(datetime_obj): ''' >>> from datetime import datetime >>> dt = datetime(2005,11,6); dayOfWeek(dt) 0 >>> dt = datetime(2005,11,7); dayOfWeek(dt) 1 >>> dt = datetime(2005,11,11); dayOfWeek(dt) 5 >>> dt = datetime(2005,11,12); dayOfWeek(dt) 6 ''' return (datetime_obj.weekday() + 1) % 7 def julian(datetime_obj): ''' Currently, this produces incorrect julian dates for dates less than the Gregorian switch-over. >>> dt = datetime(2299, 12, 31, 23, 59, 59); julian(dt) - 2561117.49999 0.0 >>> dt = datetime(2199, 12, 31, 23, 59, 59); julian(dt) - 2524593.49999 0.0 >>> dt = datetime(2099, 12, 31, 23, 59, 59); julian(dt) - 2488069.49999 0.0 >>> dt = datetime(1999, 12, 31, 23, 59, 59); julian(dt) - 2451544.49999 0.0 >>> dt = datetime(1899, 12, 31, 23, 59, 59); julian(dt) - 2415020.49999 0.0 >>> dt = datetime(1799, 12, 31, 23, 59, 59); julian(dt) - 2378496.49999 0.0 >>> dt = datetime(1699, 12, 31, 23, 59, 59); julian(dt) - 2341972.49999 0.0 >>> dt = datetime(1599, 12, 31, 23, 59, 59); julian(dt) - 2305447.49999 0.0 >>> dt = datetime(1499, 12, 31, 23, 59, 59) >>> dt = datetime(1399, 12, 31, 23, 59, 59) >>> dt = datetime(1299, 12, 31, 23, 59, 59) ''' tz = datetime_obj.tzinfo if tz and tz != GMT: datetime_obj = ut(datetime_obj) y, m, d, h, mn, s, nil, nil, tz = (datetime_obj.timetuple()) d = float(d + h/24. + mn/60. + s/60.**2) if m < 3: m += 12 y -= 1 # my day correction to bring it into accordance with the USNO Julian Calculator d -= 0.9580655555 #julian = d + (153*m - 457)/5 + int(365.25 *y) - int(y * .01 ) + int(y * .0025 ) + 1721118.5 if datetime_obj < datetime(GREGORIAN_YEAR, GREGORIAN_MONTH, GREGORIAN_DAY): b = 0 else: b = int(y / 4) - int(y / 100) + int(y / 400) if y < 0: c = int((365.25 * Y) - .75) else: c = int(365.25) * y julian = d + int((153 * m - 457)/5) + b + c + 1721118.5 return float(julian) def julianToDegrees(jd): ''' >>> ''' def meanSiderealTime(datetime_obj): ''' Returns the Mean Sidereal Time in degrees: >>> dt = datetime(1994, 06, 16, 18, 0, 0) >>> degreesToTime(meanSiderealTime(dt)) datetime.time(11, 39, 5) # vernal equinox in 2006 >>> dt = datetime(2006, 03, 20, 13, 26, 00) >>> degreesToTime(meanSiderealTime(dt)) datetime.time(11, 17, 23) # hmmm... ''' jd = julian(datetime_obj) d = jd - (julian(datetime(2000,1,1)) + 0.5) #d = -2024.75000 mst = GMST_A + (GMST_B * d) + (GMST_C * (d/CENTURY_DAYS)**2) return mst % 360 greenwhichMeanSiderealTime = meanSiderealTime GMST = meanSiderealTime def localSiderealTime(datetime_obj, longitude): ''' >>> dt = datetime(1994, 06, 16, 18, 0, 0) >>> longitude = -105.09 # loveland, co >>> localSiderealTime(dt, longitude) >>> degreesToTime(localSiderealTime(dt, longitude)) datetime.time(4, 38, 43) ''' gmst = meanSiderealTime(datetime_obj) return (gmst + longitude) % 360 #localMeanSiderealTime = localSiderealTime #LMST = localSiderealTime def equationOfTheEquinoxes(datetime_obj): jd = julian(datetime_obj) d = jd - (julian(datetime(2000,1,1)) + 0.5) c = d/CENTURY_DAYS Om = (125.04452 - 0.052954 * c) % 360 L = (280.4665 + 0.98565 * c) % 360 epsilon = (23.4393 - 0.0000004 * c) % 360 delta_psi = -0.000319 * sin(Om) - 0.000024 * sin(2*L) return delta_psi * cos(epsilon) def apparentSideralTime(datetime_obj): ''' >>> dt = datetime(1994, 06, 16, 18, 0, 0) >>> apparentSideralTime(dt) 174.77457329366436 >>> dt = datetime.now() >>> apparentSideralTime(dt) ''' jd = julian(datetime_obj) d = jd - (julian(datetime(2000,1,1)) + 0.5) c = d/CENTURY_DAYS Om = (125.04452 - 1934.136261 * c) % 360 L = (280.4665 + 36000.7698 * c) % 360 L1 = (218.3165 + 481267.8813 * c) % 360 e = (23.4393 - 0.0000004 * c) % 360 dp = -17.2 * sin(Om) - 1.32 * sin(2 * L) - 0.23 * sin(2 * L1) + 0.21 * sin(2 * Om) de = 9.2 * cos(Om) + 0.57 * cos(2 * L) + 0.1 * cos(2 * L1) - 0.09 * cos(2 * Om) gmst = meanSiderealTime(datetime_obj) correction = dp * cos(e) / 3600 return gmst + correction def localApparentSiderealTime(datetime_obj, longitude): ''' ''' gast = apparentSideralTime(datetime_obj) return (gast + longitude) % 360 def currentLAST(longitude): ''' >>> currentLAST(-105.09) >>> degreesToTime(currentLAST(-105.09)) ''' return localApparentSiderealTime(datetime.now(), longitude) def _test(): from doctest import testmod import timetime testmod(timetime) if __name__ == '__main__': _test()
PypiClean
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/thing_create_response_credentials_mqtt.py
import re # noqa: F401 import sys # noqa: F401 from openapi_client.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from openapi_client.model.thing_create_response_credentials_mqtt_data import ThingCreateResponseCredentialsMqttData from openapi_client.model.thing_create_response_credentials_mqtt_thing import ThingCreateResponseCredentialsMqttThing globals()['ThingCreateResponseCredentialsMqttData'] = ThingCreateResponseCredentialsMqttData globals()['ThingCreateResponseCredentialsMqttThing'] = ThingCreateResponseCredentialsMqttThing class ThingCreateResponseCredentialsMqtt(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'data': (ThingCreateResponseCredentialsMqttData,), # noqa: E501 'thing': (ThingCreateResponseCredentialsMqttThing,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'data': 'data', # noqa: E501 'thing': 'thing', # noqa: E501 } _composed_schemas = {} required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """ThingCreateResponseCredentialsMqtt - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) data (ThingCreateResponseCredentialsMqttData): [optional] # noqa: E501 thing (ThingCreateResponseCredentialsMqttThing): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value)
PypiClean
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_201509.py
from __future__ import unicode_literals name = '201509' division_schema = { '110000': '北京市', '110100': '市辖区', '110101': '东城区', '110102': '西城区', '110105': '朝阳区', '110106': '丰台区', '110107': '石景山区', '110108': '海淀区', '110109': '门头沟区', '110111': '房山区', '110112': '通州区', '110113': '顺义区', '110114': '昌平区', '110115': '大兴区', '110116': '怀柔区', '110117': '平谷区', '110200': '县', '110228': '密云县', '110229': '延庆县', '120000': '天津市', '120100': '市辖区', '120101': '和平区', '120102': '河东区', '120103': '河西区', '120104': '南开区', '120105': '河北区', '120106': '红桥区', '120110': '东丽区', '120111': '西青区', '120112': '津南区', '120113': '北辰区', '120114': '武清区', '120115': '宝坻区', '120116': '滨海新区', '120117': '宁河区', '120118': '静海区', '120200': '县', '120225': '蓟县', '130000': '河北省', '130100': '石家庄市', '130101': '市辖区', '130102': '长安区', '130104': '桥西区', '130105': '新华区', '130107': '井陉矿区', '130108': '裕华区', '130109': '藁城区', '130110': '鹿泉区', '130111': '栾城区', '130121': '井陉县', '130123': '正定县', '130125': '行唐县', '130126': '灵寿县', '130127': '高邑县', '130128': '深泽县', '130129': '赞皇县', '130130': '无极县', '130131': '平山县', '130132': '元氏县', '130133': '赵县', '130183': '晋州市', '130184': '新乐市', '130200': '唐山市', '130201': '市辖区', '130202': '路南区', '130203': '路北区', '130204': '古冶区', '130205': '开平区', '130207': '丰南区', '130208': '丰润区', '130209': '曹妃甸区', '130223': '滦县', '130224': '滦南县', '130225': '乐亭县', '130227': '迁西县', '130229': '玉田县', '130281': '遵化市', '130283': '迁安市', '130300': '秦皇岛市', '130301': '市辖区', '130302': '海港区', '130303': '山海关区', '130304': '北戴河区', '130306': '抚宁区', '130321': '青龙满族自治县', '130322': '昌黎县', '130324': '卢龙县', '130400': '邯郸市', '130401': '市辖区', '130402': '邯山区', '130403': '丛台区', '130404': '复兴区', '130406': '峰峰矿区', '130421': '邯郸县', '130423': '临漳县', '130424': '成安县', '130425': '大名县', '130426': '涉县', '130427': '磁县', '130428': '肥乡县', '130429': '永年县', '130430': '邱县', '130431': '鸡泽县', '130432': '广平县', '130433': '馆陶县', '130434': '魏县', '130435': '曲周县', '130481': '武安市', '130500': '邢台市', '130501': '市辖区', '130502': '桥东区', '130503': '桥西区', '130521': '邢台县', '130522': '临城县', '130523': '内丘县', '130524': '柏乡县', '130525': '隆尧县', '130526': '任县', '130527': '南和县', '130528': '宁晋县', '130529': '巨鹿县', '130530': '新河县', '130531': '广宗县', '130532': '平乡县', '130533': '威县', '130534': '清河县', '130535': '临西县', '130581': '南宫市', '130582': '沙河市', '130600': '保定市', '130601': '市辖区', '130602': '竞秀区', '130606': '莲池区', '130607': '满城区', '130608': '清苑区', '130609': '徐水区', '130623': '涞水县', '130624': '阜平县', '130626': '定兴县', '130627': '唐县', '130628': '高阳县', '130629': '容城县', '130630': '涞源县', '130631': '望都县', '130632': '安新县', '130633': '易县', '130634': '曲阳县', '130635': '蠡县', '130636': '顺平县', '130637': '博野县', '130638': '雄县', '130681': '涿州市', '130683': '安国市', '130684': '高碑店市', '130700': '张家口市', '130701': '市辖区', '130702': '桥东区', '130703': '桥西区', '130705': '宣化区', '130706': '下花园区', '130721': '宣化县', '130722': '张北县', '130723': '康保县', '130724': '沽源县', '130725': '尚义县', '130726': '蔚县', '130727': '阳原县', '130728': '怀安县', '130729': '万全县', '130730': '怀来县', '130731': '涿鹿县', '130732': '赤城县', '130733': '崇礼县', '130800': '承德市', '130801': '市辖区', '130802': '双桥区', '130803': '双滦区', '130804': '鹰手营子矿区', '130821': '承德县', '130822': '兴隆县', '130823': '平泉县', '130824': '滦平县', '130825': '隆化县', '130826': '丰宁满族自治县', '130827': '宽城满族自治县', '130828': '围场满族蒙古族自治县', '130900': '沧州市', '130901': '市辖区', '130902': '新华区', '130903': '运河区', '130921': '沧县', '130922': '青县', '130923': '东光县', '130924': '海兴县', '130925': '盐山县', '130926': '肃宁县', '130927': '南皮县', '130928': '吴桥县', '130929': '献县', '130930': '孟村回族自治县', '130981': '泊头市', '130982': '任丘市', '130983': '黄骅市', '130984': '河间市', '131000': '廊坊市', '131001': '市辖区', '131002': '安次区', '131003': '广阳区', '131022': '固安县', '131023': '永清县', '131024': '香河县', '131025': '大城县', '131026': '文安县', '131028': '大厂回族自治县', '131081': '霸州市', '131082': '三河市', '131100': '衡水市', '131101': '市辖区', '131102': '桃城区', '131121': '枣强县', '131122': '武邑县', '131123': '武强县', '131124': '饶阳县', '131125': '安平县', '131126': '故城县', '131127': '景县', '131128': '阜城县', '131181': '冀州市', '131182': '深州市', '139000': '省直辖县级行政区划', '139001': '定州市', '139002': '辛集市', '140000': '山西省', '140100': '太原市', '140101': '市辖区', '140105': '小店区', '140106': '迎泽区', '140107': '杏花岭区', '140108': '尖草坪区', '140109': '万柏林区', '140110': '晋源区', '140121': '清徐县', '140122': '阳曲县', '140123': '娄烦县', '140181': '古交市', '140200': '大同市', '140201': '市辖区', '140202': '城区', '140203': '矿区', '140211': '南郊区', '140212': '新荣区', '140221': '阳高县', '140222': '天镇县', '140223': '广灵县', '140224': '灵丘县', '140225': '浑源县', '140226': '左云县', '140227': '大同县', '140300': '阳泉市', '140301': '市辖区', '140302': '城区', '140303': '矿区', '140311': '郊区', '140321': '平定县', '140322': '盂县', '140400': '长治市', '140401': '市辖区', '140402': '城区', '140411': '郊区', '140421': '长治县', '140423': '襄垣县', '140424': '屯留县', '140425': '平顺县', '140426': '黎城县', '140427': '壶关县', '140428': '长子县', '140429': '武乡县', '140430': '沁县', '140431': '沁源县', '140481': '潞城市', '140500': '晋城市', '140501': '市辖区', '140502': '城区', '140521': '沁水县', '140522': '阳城县', '140524': '陵川县', '140525': '泽州县', '140581': '高平市', '140600': '朔州市', '140601': '市辖区', '140602': '朔城区', '140603': '平鲁区', '140621': '山阴县', '140622': '应县', '140623': '右玉县', '140624': '怀仁县', '140700': '晋中市', '140701': '市辖区', '140702': '榆次区', '140721': '榆社县', '140722': '左权县', '140723': '和顺县', '140724': '昔阳县', '140725': '寿阳县', '140726': '太谷县', '140727': '祁县', '140728': '平遥县', '140729': '灵石县', '140781': '介休市', '140800': '运城市', '140801': '市辖区', '140802': '盐湖区', '140821': '临猗县', '140822': '万荣县', '140823': '闻喜县', '140824': '稷山县', '140825': '新绛县', '140826': '绛县', '140827': '垣曲县', '140828': '夏县', '140829': '平陆县', '140830': '芮城县', '140881': '永济市', '140882': '河津市', '140900': '忻州市', '140901': '市辖区', '140902': '忻府区', '140921': '定襄县', '140922': '五台县', '140923': '代县', '140924': '繁峙县', '140925': '宁武县', '140926': '静乐县', '140927': '神池县', '140928': '五寨县', '140929': '岢岚县', '140930': '河曲县', '140931': '保德县', '140932': '偏关县', '140981': '原平市', '141000': '临汾市', '141001': '市辖区', '141002': '尧都区', '141021': '曲沃县', '141022': '翼城县', '141023': '襄汾县', '141024': '洪洞县', '141025': '古县', '141026': '安泽县', '141027': '浮山县', '141028': '吉县', '141029': '乡宁县', '141030': '大宁县', '141031': '隰县', '141032': '永和县', '141033': '蒲县', '141034': '汾西县', '141081': '侯马市', '141082': '霍州市', '141100': '吕梁市', '141101': '市辖区', '141102': '离石区', '141121': '文水县', '141122': '交城县', '141123': '兴县', '141124': '临县', '141125': '柳林县', '141126': '石楼县', '141127': '岚县', '141128': '方山县', '141129': '中阳县', '141130': '交口县', '141181': '孝义市', '141182': '汾阳市', '150000': '内蒙古自治区', '150100': '呼和浩特市', '150101': '市辖区', '150102': '新城区', '150103': '回民区', '150104': '玉泉区', '150105': '赛罕区', '150121': '土默特左旗', '150122': '托克托县', '150123': '和林格尔县', '150124': '清水河县', '150125': '武川县', '150200': '包头市', '150201': '市辖区', '150202': '东河区', '150203': '昆都仑区', '150204': '青山区', '150205': '石拐区', '150206': '白云鄂博矿区', '150207': '九原区', '150221': '土默特右旗', '150222': '固阳县', '150223': '达尔罕茂明安联合旗', '150300': '乌海市', '150301': '市辖区', '150302': '海勃湾区', '150303': '海南区', '150304': '乌达区', '150400': '赤峰市', '150401': '市辖区', '150402': '红山区', '150403': '元宝山区', '150404': '松山区', '150421': '阿鲁科尔沁旗', '150422': '巴林左旗', '150423': '巴林右旗', '150424': '林西县', '150425': '克什克腾旗', '150426': '翁牛特旗', '150428': '喀喇沁旗', '150429': '宁城县', '150430': '敖汉旗', '150500': '通辽市', '150501': '市辖区', '150502': '科尔沁区', '150521': '科尔沁左翼中旗', '150522': '科尔沁左翼后旗', '150523': '开鲁县', '150524': '库伦旗', '150525': '奈曼旗', '150526': '扎鲁特旗', '150581': '霍林郭勒市', '150600': '鄂尔多斯市', '150601': '市辖区', '150602': '东胜区', '150621': '达拉特旗', '150622': '准格尔旗', '150623': '鄂托克前旗', '150624': '鄂托克旗', '150625': '杭锦旗', '150626': '乌审旗', '150627': '伊金霍洛旗', '150700': '呼伦贝尔市', '150701': '市辖区', '150702': '海拉尔区', '150703': '扎赉诺尔区', '150721': '阿荣旗', '150722': '莫力达瓦达斡尔族自治旗', '150723': '鄂伦春自治旗', '150724': '鄂温克族自治旗', '150725': '陈巴尔虎旗', '150726': '新巴尔虎左旗', '150727': '新巴尔虎右旗', '150781': '满洲里市', '150782': '牙克石市', '150783': '扎兰屯市', '150784': '额尔古纳市', '150785': '根河市', '150800': '巴彦淖尔市', '150801': '市辖区', '150802': '临河区', '150821': '五原县', '150822': '磴口县', '150823': '乌拉特前旗', '150824': '乌拉特中旗', '150825': '乌拉特后旗', '150826': '杭锦后旗', '150900': '乌兰察布市', '150901': '市辖区', '150902': '集宁区', '150921': '卓资县', '150922': '化德县', '150923': '商都县', '150924': '兴和县', '150925': '凉城县', '150926': '察哈尔右翼前旗', '150927': '察哈尔右翼中旗', '150928': '察哈尔右翼后旗', '150929': '四子王旗', '150981': '丰镇市', '152200': '兴安盟', '152201': '乌兰浩特市', '152202': '阿尔山市', '152221': '科尔沁右翼前旗', '152222': '科尔沁右翼中旗', '152223': '扎赉特旗', '152224': '突泉县', '152500': '锡林郭勒盟', '152501': '二连浩特市', '152502': '锡林浩特市', '152522': '阿巴嘎旗', '152523': '苏尼特左旗', '152524': '苏尼特右旗', '152525': '东乌珠穆沁旗', '152526': '西乌珠穆沁旗', '152527': '太仆寺旗', '152528': '镶黄旗', '152529': '正镶白旗', '152530': '正蓝旗', '152531': '多伦县', '152900': '阿拉善盟', '152921': '阿拉善左旗', '152922': '阿拉善右旗', '152923': '额济纳旗', '210000': '辽宁省', '210100': '沈阳市', '210101': '市辖区', '210102': '和平区', '210103': '沈河区', '210104': '大东区', '210105': '皇姑区', '210106': '铁西区', '210111': '苏家屯区', '210112': '浑南区', '210113': '沈北新区', '210114': '于洪区', '210122': '辽中县', '210123': '康平县', '210124': '法库县', '210181': '新民市', '210200': '大连市', '210201': '市辖区', '210202': '中山区', '210203': '西岗区', '210204': '沙河口区', '210211': '甘井子区', '210212': '旅顺口区', '210213': '金州区', '210224': '长海县', '210281': '瓦房店市', '210282': '普兰店市', '210283': '庄河市', '210300': '鞍山市', '210301': '市辖区', '210302': '铁东区', '210303': '铁西区', '210304': '立山区', '210311': '千山区', '210321': '台安县', '210323': '岫岩满族自治县', '210381': '海城市', '210400': '抚顺市', '210401': '市辖区', '210402': '新抚区', '210403': '东洲区', '210404': '望花区', '210411': '顺城区', '210421': '抚顺县', '210422': '新宾满族自治县', '210423': '清原满族自治县', '210500': '本溪市', '210501': '市辖区', '210502': '平山区', '210503': '溪湖区', '210504': '明山区', '210505': '南芬区', '210521': '本溪满族自治县', '210522': '桓仁满族自治县', '210600': '丹东市', '210601': '市辖区', '210602': '元宝区', '210603': '振兴区', '210604': '振安区', '210624': '宽甸满族自治县', '210681': '东港市', '210682': '凤城市', '210700': '锦州市', '210701': '市辖区', '210702': '古塔区', '210703': '凌河区', '210711': '太和区', '210726': '黑山县', '210727': '义县', '210781': '凌海市', '210782': '北镇市', '210800': '营口市', '210801': '市辖区', '210802': '站前区', '210803': '西市区', '210804': '鲅鱼圈区', '210811': '老边区', '210881': '盖州市', '210882': '大石桥市', '210900': '阜新市', '210901': '市辖区', '210902': '海州区', '210903': '新邱区', '210904': '太平区', '210905': '清河门区', '210911': '细河区', '210921': '阜新蒙古族自治县', '210922': '彰武县', '211000': '辽阳市', '211001': '市辖区', '211002': '白塔区', '211003': '文圣区', '211004': '宏伟区', '211005': '弓长岭区', '211011': '太子河区', '211021': '辽阳县', '211081': '灯塔市', '211100': '盘锦市', '211101': '市辖区', '211102': '双台子区', '211103': '兴隆台区', '211121': '大洼县', '211122': '盘山县', '211200': '铁岭市', '211201': '市辖区', '211202': '银州区', '211204': '清河区', '211221': '铁岭县', '211223': '西丰县', '211224': '昌图县', '211281': '调兵山市', '211282': '开原市', '211300': '朝阳市', '211301': '市辖区', '211302': '双塔区', '211303': '龙城区', '211321': '朝阳县', '211322': '建平县', '211324': '喀喇沁左翼蒙古族自治县', '211381': '北票市', '211382': '凌源市', '211400': '葫芦岛市', '211401': '市辖区', '211402': '连山区', '211403': '龙港区', '211404': '南票区', '211421': '绥中县', '211422': '建昌县', '211481': '兴城市', '220000': '吉林省', '220100': '长春市', '220101': '市辖区', '220102': '南关区', '220103': '宽城区', '220104': '朝阳区', '220105': '二道区', '220106': '绿园区', '220112': '双阳区', '220113': '九台区', '220122': '农安县', '220182': '榆树市', '220183': '德惠市', '220200': '吉林市', '220201': '市辖区', '220202': '昌邑区', '220203': '龙潭区', '220204': '船营区', '220211': '丰满区', '220221': '永吉县', '220281': '蛟河市', '220282': '桦甸市', '220283': '舒兰市', '220284': '磐石市', '220300': '四平市', '220301': '市辖区', '220302': '铁西区', '220303': '铁东区', '220322': '梨树县', '220323': '伊通满族自治县', '220381': '公主岭市', '220382': '双辽市', '220400': '辽源市', '220401': '市辖区', '220402': '龙山区', '220403': '西安区', '220421': '东丰县', '220422': '东辽县', '220500': '通化市', '220501': '市辖区', '220502': '东昌区', '220503': '二道江区', '220521': '通化县', '220523': '辉南县', '220524': '柳河县', '220581': '梅河口市', '220582': '集安市', '220600': '白山市', '220601': '市辖区', '220602': '浑江区', '220605': '江源区', '220621': '抚松县', '220622': '靖宇县', '220623': '长白朝鲜族自治县', '220681': '临江市', '220700': '松原市', '220701': '市辖区', '220702': '宁江区', '220721': '前郭尔罗斯蒙古族自治县', '220722': '长岭县', '220723': '乾安县', '220781': '扶余市', '220800': '白城市', '220801': '市辖区', '220802': '洮北区', '220821': '镇赉县', '220822': '通榆县', '220881': '洮南市', '220882': '大安市', '222400': '延边朝鲜族自治州', '222401': '延吉市', '222402': '图们市', '222403': '敦化市', '222404': '珲春市', '222405': '龙井市', '222406': '和龙市', '222424': '汪清县', '222426': '安图县', '230000': '黑龙江省', '230100': '哈尔滨市', '230101': '市辖区', '230102': '道里区', '230103': '南岗区', '230104': '道外区', '230108': '平房区', '230109': '松北区', '230110': '香坊区', '230111': '呼兰区', '230112': '阿城区', '230113': '双城区', '230123': '依兰县', '230124': '方正县', '230125': '宾县', '230126': '巴彦县', '230127': '木兰县', '230128': '通河县', '230129': '延寿县', '230183': '尚志市', '230184': '五常市', '230200': '齐齐哈尔市', '230201': '市辖区', '230202': '龙沙区', '230203': '建华区', '230204': '铁锋区', '230205': '昂昂溪区', '230206': '富拉尔基区', '230207': '碾子山区', '230208': '梅里斯达斡尔族区', '230221': '龙江县', '230223': '依安县', '230224': '泰来县', '230225': '甘南县', '230227': '富裕县', '230229': '克山县', '230230': '克东县', '230231': '拜泉县', '230281': '讷河市', '230300': '鸡西市', '230301': '市辖区', '230302': '鸡冠区', '230303': '恒山区', '230304': '滴道区', '230305': '梨树区', '230306': '城子河区', '230307': '麻山区', '230321': '鸡东县', '230381': '虎林市', '230382': '密山市', '230400': '鹤岗市', '230401': '市辖区', '230402': '向阳区', '230403': '工农区', '230404': '南山区', '230405': '兴安区', '230406': '东山区', '230407': '兴山区', '230421': '萝北县', '230422': '绥滨县', '230500': '双鸭山市', '230501': '市辖区', '230502': '尖山区', '230503': '岭东区', '230505': '四方台区', '230506': '宝山区', '230521': '集贤县', '230522': '友谊县', '230523': '宝清县', '230524': '饶河县', '230600': '大庆市', '230601': '市辖区', '230602': '萨尔图区', '230603': '龙凤区', '230604': '让胡路区', '230605': '红岗区', '230606': '大同区', '230621': '肇州县', '230622': '肇源县', '230623': '林甸县', '230624': '杜尔伯特蒙古族自治县', '230700': '伊春市', '230701': '市辖区', '230702': '伊春区', '230703': '南岔区', '230704': '友好区', '230705': '西林区', '230706': '翠峦区', '230707': '新青区', '230708': '美溪区', '230709': '金山屯区', '230710': '五营区', '230711': '乌马河区', '230712': '汤旺河区', '230713': '带岭区', '230714': '乌伊岭区', '230715': '红星区', '230716': '上甘岭区', '230722': '嘉荫县', '230781': '铁力市', '230800': '佳木斯市', '230801': '市辖区', '230803': '向阳区', '230804': '前进区', '230805': '东风区', '230811': '郊区', '230822': '桦南县', '230826': '桦川县', '230828': '汤原县', '230833': '抚远县', '230881': '同江市', '230882': '富锦市', '230900': '七台河市', '230901': '市辖区', '230902': '新兴区', '230903': '桃山区', '230904': '茄子河区', '230921': '勃利县', '231000': '牡丹江市', '231001': '市辖区', '231002': '东安区', '231003': '阳明区', '231004': '爱民区', '231005': '西安区', '231024': '东宁县', '231025': '林口县', '231081': '绥芬河市', '231083': '海林市', '231084': '宁安市', '231085': '穆棱市', '231100': '黑河市', '231101': '市辖区', '231102': '爱辉区', '231121': '嫩江县', '231123': '逊克县', '231124': '孙吴县', '231181': '北安市', '231182': '五大连池市', '231200': '绥化市', '231201': '市辖区', '231202': '北林区', '231221': '望奎县', '231222': '兰西县', '231223': '青冈县', '231224': '庆安县', '231225': '明水县', '231226': '绥棱县', '231281': '安达市', '231282': '肇东市', '231283': '海伦市', '232700': '大兴安岭地区', '232721': '呼玛县', '232722': '塔河县', '232723': '漠河县', '310000': '上海市', '310100': '市辖区', '310101': '黄浦区', '310104': '徐汇区', '310105': '长宁区', '310106': '静安区', '310107': '普陀区', '310108': '闸北区', '310109': '虹口区', '310110': '杨浦区', '310112': '闵行区', '310113': '宝山区', '310114': '嘉定区', '310115': '浦东新区', '310116': '金山区', '310117': '松江区', '310118': '青浦区', '310120': '奉贤区', '310200': '县', '310230': '崇明县', '320000': '江苏省', '320100': '南京市', '320101': '市辖区', '320102': '玄武区', '320104': '秦淮区', '320105': '建邺区', '320106': '鼓楼区', '320111': '浦口区', '320113': '栖霞区', '320114': '雨花台区', '320115': '江宁区', '320116': '六合区', '320117': '溧水区', '320118': '高淳区', '320200': '无锡市', '320201': '市辖区', '320202': '崇安区', '320203': '南长区', '320204': '北塘区', '320205': '锡山区', '320206': '惠山区', '320211': '滨湖区', '320281': '江阴市', '320282': '宜兴市', '320300': '徐州市', '320301': '市辖区', '320302': '鼓楼区', '320303': '云龙区', '320305': '贾汪区', '320311': '泉山区', '320312': '铜山区', '320321': '丰县', '320322': '沛县', '320324': '睢宁县', '320381': '新沂市', '320382': '邳州市', '320400': '常州市', '320401': '市辖区', '320402': '天宁区', '320404': '钟楼区', '320411': '新北区', '320412': '武进区', '320413': '金坛区', '320481': '溧阳市', '320500': '苏州市', '320501': '市辖区', '320505': '虎丘区', '320506': '吴中区', '320507': '相城区', '320508': '姑苏区', '320509': '吴江区', '320581': '常熟市', '320582': '张家港市', '320583': '昆山市', '320585': '太仓市', '320600': '南通市', '320601': '市辖区', '320602': '崇川区', '320611': '港闸区', '320612': '通州区', '320621': '海安县', '320623': '如东县', '320681': '启东市', '320682': '如皋市', '320684': '海门市', '320700': '连云港市', '320701': '市辖区', '320703': '连云区', '320706': '海州区', '320707': '赣榆区', '320722': '东海县', '320723': '灌云县', '320724': '灌南县', '320800': '淮安市', '320801': '市辖区', '320802': '清河区', '320803': '淮安区', '320804': '淮阴区', '320811': '清浦区', '320826': '涟水县', '320829': '洪泽县', '320830': '盱眙县', '320831': '金湖县', '320900': '盐城市', '320901': '市辖区', '320902': '亭湖区', '320903': '盐都区', '320904': '大丰区', '320921': '响水县', '320922': '滨海县', '320923': '阜宁县', '320924': '射阳县', '320925': '建湖县', '320981': '东台市', '321000': '扬州市', '321001': '市辖区', '321002': '广陵区', '321003': '邗江区', '321012': '江都区', '321023': '宝应县', '321081': '仪征市', '321084': '高邮市', '321100': '镇江市', '321101': '市辖区', '321102': '京口区', '321111': '润州区', '321112': '丹徒区', '321181': '丹阳市', '321182': '扬中市', '321183': '句容市', '321200': '泰州市', '321201': '市辖区', '321202': '海陵区', '321203': '高港区', '321204': '姜堰区', '321281': '兴化市', '321282': '靖江市', '321283': '泰兴市', '321300': '宿迁市', '321301': '市辖区', '321302': '宿城区', '321311': '宿豫区', '321322': '沭阳县', '321323': '泗阳县', '321324': '泗洪县', '330000': '浙江省', '330100': '杭州市', '330101': '市辖区', '330102': '上城区', '330103': '下城区', '330104': '江干区', '330105': '拱墅区', '330106': '西湖区', '330108': '滨江区', '330109': '萧山区', '330110': '余杭区', '330111': '富阳区', '330122': '桐庐县', '330127': '淳安县', '330182': '建德市', '330185': '临安市', '330200': '宁波市', '330201': '市辖区', '330203': '海曙区', '330204': '江东区', '330205': '江北区', '330206': '北仑区', '330211': '镇海区', '330212': '鄞州区', '330225': '象山县', '330226': '宁海县', '330281': '余姚市', '330282': '慈溪市', '330283': '奉化市', '330300': '温州市', '330301': '市辖区', '330302': '鹿城区', '330303': '龙湾区', '330304': '瓯海区', '330305': '洞头区', '330324': '永嘉县', '330326': '平阳县', '330327': '苍南县', '330328': '文成县', '330329': '泰顺县', '330381': '瑞安市', '330382': '乐清市', '330400': '嘉兴市', '330401': '市辖区', '330402': '南湖区', '330411': '秀洲区', '330421': '嘉善县', '330424': '海盐县', '330481': '海宁市', '330482': '平湖市', '330483': '桐乡市', '330500': '湖州市', '330501': '市辖区', '330502': '吴兴区', '330503': '南浔区', '330521': '德清县', '330522': '长兴县', '330523': '安吉县', '330600': '绍兴市', '330601': '市辖区', '330602': '越城区', '330603': '柯桥区', '330604': '上虞区', '330624': '新昌县', '330681': '诸暨市', '330683': '嵊州市', '330700': '金华市', '330701': '市辖区', '330702': '婺城区', '330703': '金东区', '330723': '武义县', '330726': '浦江县', '330727': '磐安县', '330781': '兰溪市', '330782': '义乌市', '330783': '东阳市', '330784': '永康市', '330800': '衢州市', '330801': '市辖区', '330802': '柯城区', '330803': '衢江区', '330822': '常山县', '330824': '开化县', '330825': '龙游县', '330881': '江山市', '330900': '舟山市', '330901': '市辖区', '330902': '定海区', '330903': '普陀区', '330921': '岱山县', '330922': '嵊泗县', '331000': '台州市', '331001': '市辖区', '331002': '椒江区', '331003': '黄岩区', '331004': '路桥区', '331021': '玉环县', '331022': '三门县', '331023': '天台县', '331024': '仙居县', '331081': '温岭市', '331082': '临海市', '331100': '丽水市', '331101': '市辖区', '331102': '莲都区', '331121': '青田县', '331122': '缙云县', '331123': '遂昌县', '331124': '松阳县', '331125': '云和县', '331126': '庆元县', '331127': '景宁畲族自治县', '331181': '龙泉市', '340000': '安徽省', '340100': '合肥市', '340101': '市辖区', '340102': '瑶海区', '340103': '庐阳区', '340104': '蜀山区', '340111': '包河区', '340121': '长丰县', '340122': '肥东县', '340123': '肥西县', '340124': '庐江县', '340181': '巢湖市', '340200': '芜湖市', '340201': '市辖区', '340202': '镜湖区', '340203': '弋江区', '340207': '鸠江区', '340208': '三山区', '340221': '芜湖县', '340222': '繁昌县', '340223': '南陵县', '340225': '无为县', '340300': '蚌埠市', '340301': '市辖区', '340302': '龙子湖区', '340303': '蚌山区', '340304': '禹会区', '340311': '淮上区', '340321': '怀远县', '340322': '五河县', '340323': '固镇县', '340400': '淮南市', '340401': '市辖区', '340402': '大通区', '340403': '田家庵区', '340404': '谢家集区', '340405': '八公山区', '340406': '潘集区', '340421': '凤台县', '340500': '马鞍山市', '340501': '市辖区', '340503': '花山区', '340504': '雨山区', '340506': '博望区', '340521': '当涂县', '340522': '含山县', '340523': '和县', '340600': '淮北市', '340601': '市辖区', '340602': '杜集区', '340603': '相山区', '340604': '烈山区', '340621': '濉溪县', '340700': '铜陵市', '340701': '市辖区', '340702': '铜官山区', '340703': '狮子山区', '340711': '郊区', '340721': '铜陵县', '340800': '安庆市', '340801': '市辖区', '340802': '迎江区', '340803': '大观区', '340811': '宜秀区', '340822': '怀宁县', '340823': '枞阳县', '340824': '潜山县', '340825': '太湖县', '340826': '宿松县', '340827': '望江县', '340828': '岳西县', '340881': '桐城市', '341000': '黄山市', '341001': '市辖区', '341002': '屯溪区', '341003': '黄山区', '341004': '徽州区', '341021': '歙县', '341022': '休宁县', '341023': '黟县', '341024': '祁门县', '341100': '滁州市', '341101': '市辖区', '341102': '琅琊区', '341103': '南谯区', '341122': '来安县', '341124': '全椒县', '341125': '定远县', '341126': '凤阳县', '341181': '天长市', '341182': '明光市', '341200': '阜阳市', '341201': '市辖区', '341202': '颍州区', '341203': '颍东区', '341204': '颍泉区', '341221': '临泉县', '341222': '太和县', '341225': '阜南县', '341226': '颍上县', '341282': '界首市', '341300': '宿州市', '341301': '市辖区', '341302': '埇桥区', '341321': '砀山县', '341322': '萧县', '341323': '灵璧县', '341324': '泗县', '341500': '六安市', '341501': '市辖区', '341502': '金安区', '341503': '裕安区', '341521': '寿县', '341522': '霍邱县', '341523': '舒城县', '341524': '金寨县', '341525': '霍山县', '341600': '亳州市', '341601': '市辖区', '341602': '谯城区', '341621': '涡阳县', '341622': '蒙城县', '341623': '利辛县', '341700': '池州市', '341701': '市辖区', '341702': '贵池区', '341721': '东至县', '341722': '石台县', '341723': '青阳县', '341800': '宣城市', '341801': '市辖区', '341802': '宣州区', '341821': '郎溪县', '341822': '广德县', '341823': '泾县', '341824': '绩溪县', '341825': '旌德县', '341881': '宁国市', '350000': '福建省', '350100': '福州市', '350101': '市辖区', '350102': '鼓楼区', '350103': '台江区', '350104': '仓山区', '350105': '马尾区', '350111': '晋安区', '350121': '闽侯县', '350122': '连江县', '350123': '罗源县', '350124': '闽清县', '350125': '永泰县', '350128': '平潭县', '350181': '福清市', '350182': '长乐市', '350200': '厦门市', '350201': '市辖区', '350203': '思明区', '350205': '海沧区', '350206': '湖里区', '350211': '集美区', '350212': '同安区', '350213': '翔安区', '350300': '莆田市', '350301': '市辖区', '350302': '城厢区', '350303': '涵江区', '350304': '荔城区', '350305': '秀屿区', '350322': '仙游县', '350400': '三明市', '350401': '市辖区', '350402': '梅列区', '350403': '三元区', '350421': '明溪县', '350423': '清流县', '350424': '宁化县', '350425': '大田县', '350426': '尤溪县', '350427': '沙县', '350428': '将乐县', '350429': '泰宁县', '350430': '建宁县', '350481': '永安市', '350500': '泉州市', '350501': '市辖区', '350502': '鲤城区', '350503': '丰泽区', '350504': '洛江区', '350505': '泉港区', '350521': '惠安县', '350524': '安溪县', '350525': '永春县', '350526': '德化县', '350527': '金门县', '350581': '石狮市', '350582': '晋江市', '350583': '南安市', '350600': '漳州市', '350601': '市辖区', '350602': '芗城区', '350603': '龙文区', '350622': '云霄县', '350623': '漳浦县', '350624': '诏安县', '350625': '长泰县', '350626': '东山县', '350627': '南靖县', '350628': '平和县', '350629': '华安县', '350681': '龙海市', '350700': '南平市', '350701': '市辖区', '350702': '延平区', '350703': '建阳区', '350721': '顺昌县', '350722': '浦城县', '350723': '光泽县', '350724': '松溪县', '350725': '政和县', '350781': '邵武市', '350782': '武夷山市', '350783': '建瓯市', '350800': '龙岩市', '350801': '市辖区', '350802': '新罗区', '350803': '永定区', '350821': '长汀县', '350823': '上杭县', '350824': '武平县', '350825': '连城县', '350881': '漳平市', '350900': '宁德市', '350901': '市辖区', '350902': '蕉城区', '350921': '霞浦县', '350922': '古田县', '350923': '屏南县', '350924': '寿宁县', '350925': '周宁县', '350926': '柘荣县', '350981': '福安市', '350982': '福鼎市', '360000': '江西省', '360100': '南昌市', '360101': '市辖区', '360102': '东湖区', '360103': '西湖区', '360104': '青云谱区', '360105': '湾里区', '360111': '青山湖区', '360112': '新建区', '360121': '南昌县', '360123': '安义县', '360124': '进贤县', '360200': '景德镇市', '360201': '市辖区', '360202': '昌江区', '360203': '珠山区', '360222': '浮梁县', '360281': '乐平市', '360300': '萍乡市', '360301': '市辖区', '360302': '安源区', '360313': '湘东区', '360321': '莲花县', '360322': '上栗县', '360323': '芦溪县', '360400': '九江市', '360401': '市辖区', '360402': '庐山区', '360403': '浔阳区', '360421': '九江县', '360423': '武宁县', '360424': '修水县', '360425': '永修县', '360426': '德安县', '360427': '星子县', '360428': '都昌县', '360429': '湖口县', '360430': '彭泽县', '360481': '瑞昌市', '360482': '共青城市', '360500': '新余市', '360501': '市辖区', '360502': '渝水区', '360521': '分宜县', '360600': '鹰潭市', '360601': '市辖区', '360602': '月湖区', '360622': '余江县', '360681': '贵溪市', '360700': '赣州市', '360701': '市辖区', '360702': '章贡区', '360703': '南康区', '360721': '赣县', '360722': '信丰县', '360723': '大余县', '360724': '上犹县', '360725': '崇义县', '360726': '安远县', '360727': '龙南县', '360728': '定南县', '360729': '全南县', '360730': '宁都县', '360731': '于都县', '360732': '兴国县', '360733': '会昌县', '360734': '寻乌县', '360735': '石城县', '360781': '瑞金市', '360800': '吉安市', '360801': '市辖区', '360802': '吉州区', '360803': '青原区', '360821': '吉安县', '360822': '吉水县', '360823': '峡江县', '360824': '新干县', '360825': '永丰县', '360826': '泰和县', '360827': '遂川县', '360828': '万安县', '360829': '安福县', '360830': '永新县', '360881': '井冈山市', '360900': '宜春市', '360901': '市辖区', '360902': '袁州区', '360921': '奉新县', '360922': '万载县', '360923': '上高县', '360924': '宜丰县', '360925': '靖安县', '360926': '铜鼓县', '360981': '丰城市', '360982': '樟树市', '360983': '高安市', '361000': '抚州市', '361001': '市辖区', '361002': '临川区', '361021': '南城县', '361022': '黎川县', '361023': '南丰县', '361024': '崇仁县', '361025': '乐安县', '361026': '宜黄县', '361027': '金溪县', '361028': '资溪县', '361029': '东乡县', '361030': '广昌县', '361100': '上饶市', '361101': '市辖区', '361102': '信州区', '361103': '广丰区', '361121': '上饶县', '361123': '玉山县', '361124': '铅山县', '361125': '横峰县', '361126': '弋阳县', '361127': '余干县', '361128': '鄱阳县', '361129': '万年县', '361130': '婺源县', '361181': '德兴市', '370000': '山东省', '370100': '济南市', '370101': '市辖区', '370102': '历下区', '370103': '市中区', '370104': '槐荫区', '370105': '天桥区', '370112': '历城区', '370113': '长清区', '370124': '平阴县', '370125': '济阳县', '370126': '商河县', '370181': '章丘市', '370200': '青岛市', '370201': '市辖区', '370202': '市南区', '370203': '市北区', '370211': '黄岛区', '370212': '崂山区', '370213': '李沧区', '370214': '城阳区', '370281': '胶州市', '370282': '即墨市', '370283': '平度市', '370285': '莱西市', '370300': '淄博市', '370301': '市辖区', '370302': '淄川区', '370303': '张店区', '370304': '博山区', '370305': '临淄区', '370306': '周村区', '370321': '桓台县', '370322': '高青县', '370323': '沂源县', '370400': '枣庄市', '370401': '市辖区', '370402': '市中区', '370403': '薛城区', '370404': '峄城区', '370405': '台儿庄区', '370406': '山亭区', '370481': '滕州市', '370500': '东营市', '370501': '市辖区', '370502': '东营区', '370503': '河口区', '370521': '垦利县', '370522': '利津县', '370523': '广饶县', '370600': '烟台市', '370601': '市辖区', '370602': '芝罘区', '370611': '福山区', '370612': '牟平区', '370613': '莱山区', '370634': '长岛县', '370681': '龙口市', '370682': '莱阳市', '370683': '莱州市', '370684': '蓬莱市', '370685': '招远市', '370686': '栖霞市', '370687': '海阳市', '370700': '潍坊市', '370701': '市辖区', '370702': '潍城区', '370703': '寒亭区', '370704': '坊子区', '370705': '奎文区', '370724': '临朐县', '370725': '昌乐县', '370781': '青州市', '370782': '诸城市', '370783': '寿光市', '370784': '安丘市', '370785': '高密市', '370786': '昌邑市', '370800': '济宁市', '370801': '市辖区', '370811': '任城区', '370812': '兖州区', '370826': '微山县', '370827': '鱼台县', '370828': '金乡县', '370829': '嘉祥县', '370830': '汶上县', '370831': '泗水县', '370832': '梁山县', '370881': '曲阜市', '370883': '邹城市', '370900': '泰安市', '370901': '市辖区', '370902': '泰山区', '370911': '岱岳区', '370921': '宁阳县', '370923': '东平县', '370982': '新泰市', '370983': '肥城市', '371000': '威海市', '371001': '市辖区', '371002': '环翠区', '371003': '文登区', '371082': '荣成市', '371083': '乳山市', '371100': '日照市', '371101': '市辖区', '371102': '东港区', '371103': '岚山区', '371121': '五莲县', '371122': '莒县', '371200': '莱芜市', '371201': '市辖区', '371202': '莱城区', '371203': '钢城区', '371300': '临沂市', '371301': '市辖区', '371302': '兰山区', '371311': '罗庄区', '371312': '河东区', '371321': '沂南县', '371322': '郯城县', '371323': '沂水县', '371324': '兰陵县', '371325': '费县', '371326': '平邑县', '371327': '莒南县', '371328': '蒙阴县', '371329': '临沭县', '371400': '德州市', '371401': '市辖区', '371402': '德城区', '371403': '陵城区', '371422': '宁津县', '371423': '庆云县', '371424': '临邑县', '371425': '齐河县', '371426': '平原县', '371427': '夏津县', '371428': '武城县', '371481': '乐陵市', '371482': '禹城市', '371500': '聊城市', '371501': '市辖区', '371502': '东昌府区', '371521': '阳谷县', '371522': '莘县', '371523': '茌平县', '371524': '东阿县', '371525': '冠县', '371526': '高唐县', '371581': '临清市', '371600': '滨州市', '371601': '市辖区', '371602': '滨城区', '371603': '沾化区', '371621': '惠民县', '371622': '阳信县', '371623': '无棣县', '371625': '博兴县', '371626': '邹平县', '371700': '菏泽市', '371701': '市辖区', '371702': '牡丹区', '371721': '曹县', '371722': '单县', '371723': '成武县', '371724': '巨野县', '371725': '郓城县', '371726': '鄄城县', '371727': '定陶县', '371728': '东明县', '410000': '河南省', '410100': '郑州市', '410101': '市辖区', '410102': '中原区', '410103': '二七区', '410104': '管城回族区', '410105': '金水区', '410106': '上街区', '410108': '惠济区', '410122': '中牟县', '410181': '巩义市', '410182': '荥阳市', '410183': '新密市', '410184': '新郑市', '410185': '登封市', '410200': '开封市', '410201': '市辖区', '410202': '龙亭区', '410203': '顺河回族区', '410204': '鼓楼区', '410205': '禹王台区', '410211': '金明区', '410212': '祥符区', '410221': '杞县', '410222': '通许县', '410223': '尉氏县', '410225': '兰考县', '410300': '洛阳市', '410301': '市辖区', '410302': '老城区', '410303': '西工区', '410304': '瀍河回族区', '410305': '涧西区', '410306': '吉利区', '410311': '洛龙区', '410322': '孟津县', '410323': '新安县', '410324': '栾川县', '410325': '嵩县', '410326': '汝阳县', '410327': '宜阳县', '410328': '洛宁县', '410329': '伊川县', '410381': '偃师市', '410400': '平顶山市', '410401': '市辖区', '410402': '新华区', '410403': '卫东区', '410404': '石龙区', '410411': '湛河区', '410421': '宝丰县', '410422': '叶县', '410423': '鲁山县', '410425': '郏县', '410481': '舞钢市', '410482': '汝州市', '410500': '安阳市', '410501': '市辖区', '410502': '文峰区', '410503': '北关区', '410505': '殷都区', '410506': '龙安区', '410522': '安阳县', '410523': '汤阴县', '410526': '滑县', '410527': '内黄县', '410581': '林州市', '410600': '鹤壁市', '410601': '市辖区', '410602': '鹤山区', '410603': '山城区', '410611': '淇滨区', '410621': '浚县', '410622': '淇县', '410700': '新乡市', '410701': '市辖区', '410702': '红旗区', '410703': '卫滨区', '410704': '凤泉区', '410711': '牧野区', '410721': '新乡县', '410724': '获嘉县', '410725': '原阳县', '410726': '延津县', '410727': '封丘县', '410728': '长垣县', '410781': '卫辉市', '410782': '辉县市', '410800': '焦作市', '410801': '市辖区', '410802': '解放区', '410803': '中站区', '410804': '马村区', '410811': '山阳区', '410821': '修武县', '410822': '博爱县', '410823': '武陟县', '410825': '温县', '410882': '沁阳市', '410883': '孟州市', '410900': '濮阳市', '410901': '市辖区', '410902': '华龙区', '410922': '清丰县', '410923': '南乐县', '410926': '范县', '410927': '台前县', '410928': '濮阳县', '411000': '许昌市', '411001': '市辖区', '411002': '魏都区', '411023': '许昌县', '411024': '鄢陵县', '411025': '襄城县', '411081': '禹州市', '411082': '长葛市', '411100': '漯河市', '411101': '市辖区', '411102': '源汇区', '411103': '郾城区', '411104': '召陵区', '411121': '舞阳县', '411122': '临颍县', '411200': '三门峡市', '411201': '市辖区', '411202': '湖滨区', '411221': '渑池县', '411222': '陕县', '411224': '卢氏县', '411281': '义马市', '411282': '灵宝市', '411300': '南阳市', '411301': '市辖区', '411302': '宛城区', '411303': '卧龙区', '411321': '南召县', '411322': '方城县', '411323': '西峡县', '411324': '镇平县', '411325': '内乡县', '411326': '淅川县', '411327': '社旗县', '411328': '唐河县', '411329': '新野县', '411330': '桐柏县', '411381': '邓州市', '411400': '商丘市', '411401': '市辖区', '411402': '梁园区', '411403': '睢阳区', '411421': '民权县', '411422': '睢县', '411423': '宁陵县', '411424': '柘城县', '411425': '虞城县', '411426': '夏邑县', '411481': '永城市', '411500': '信阳市', '411501': '市辖区', '411502': '浉河区', '411503': '平桥区', '411521': '罗山县', '411522': '光山县', '411523': '新县', '411524': '商城县', '411525': '固始县', '411526': '潢川县', '411527': '淮滨县', '411528': '息县', '411600': '周口市', '411601': '市辖区', '411602': '川汇区', '411621': '扶沟县', '411622': '西华县', '411623': '商水县', '411624': '沈丘县', '411625': '郸城县', '411626': '淮阳县', '411627': '太康县', '411628': '鹿邑县', '411681': '项城市', '411700': '驻马店市', '411701': '市辖区', '411702': '驿城区', '411721': '西平县', '411722': '上蔡县', '411723': '平舆县', '411724': '正阳县', '411725': '确山县', '411726': '泌阳县', '411727': '汝南县', '411728': '遂平县', '411729': '新蔡县', '419000': '省直辖县级行政区划', '419001': '济源市', '420000': '湖北省', '420100': '武汉市', '420101': '市辖区', '420102': '江岸区', '420103': '江汉区', '420104': '硚口区', '420105': '汉阳区', '420106': '武昌区', '420107': '青山区', '420111': '洪山区', '420112': '东西湖区', '420113': '汉南区', '420114': '蔡甸区', '420115': '江夏区', '420116': '黄陂区', '420117': '新洲区', '420200': '黄石市', '420201': '市辖区', '420202': '黄石港区', '420203': '西塞山区', '420204': '下陆区', '420205': '铁山区', '420222': '阳新县', '420281': '大冶市', '420300': '十堰市', '420301': '市辖区', '420302': '茅箭区', '420303': '张湾区', '420304': '郧阳区', '420322': '郧西县', '420323': '竹山县', '420324': '竹溪县', '420325': '房县', '420381': '丹江口市', '420500': '宜昌市', '420501': '市辖区', '420502': '西陵区', '420503': '伍家岗区', '420504': '点军区', '420505': '猇亭区', '420506': '夷陵区', '420525': '远安县', '420526': '兴山县', '420527': '秭归县', '420528': '长阳土家族自治县', '420529': '五峰土家族自治县', '420581': '宜都市', '420582': '当阳市', '420583': '枝江市', '420600': '襄阳市', '420601': '市辖区', '420602': '襄城区', '420606': '樊城区', '420607': '襄州区', '420624': '南漳县', '420625': '谷城县', '420626': '保康县', '420682': '老河口市', '420683': '枣阳市', '420684': '宜城市', '420700': '鄂州市', '420701': '市辖区', '420702': '梁子湖区', '420703': '华容区', '420704': '鄂城区', '420800': '荆门市', '420801': '市辖区', '420802': '东宝区', '420804': '掇刀区', '420821': '京山县', '420822': '沙洋县', '420881': '钟祥市', '420900': '孝感市', '420901': '市辖区', '420902': '孝南区', '420921': '孝昌县', '420922': '大悟县', '420923': '云梦县', '420981': '应城市', '420982': '安陆市', '420984': '汉川市', '421000': '荆州市', '421001': '市辖区', '421002': '沙市区', '421003': '荆州区', '421022': '公安县', '421023': '监利县', '421024': '江陵县', '421081': '石首市', '421083': '洪湖市', '421087': '松滋市', '421100': '黄冈市', '421101': '市辖区', '421102': '黄州区', '421121': '团风县', '421122': '红安县', '421123': '罗田县', '421124': '英山县', '421125': '浠水县', '421126': '蕲春县', '421127': '黄梅县', '421181': '麻城市', '421182': '武穴市', '421200': '咸宁市', '421201': '市辖区', '421202': '咸安区', '421221': '嘉鱼县', '421222': '通城县', '421223': '崇阳县', '421224': '通山县', '421281': '赤壁市', '421300': '随州市', '421301': '市辖区', '421303': '曾都区', '421321': '随县', '421381': '广水市', '422800': '恩施土家族苗族自治州', '422801': '恩施市', '422802': '利川市', '422822': '建始县', '422823': '巴东县', '422825': '宣恩县', '422826': '咸丰县', '422827': '来凤县', '422828': '鹤峰县', '429000': '省直辖县级行政区划', '429004': '仙桃市', '429005': '潜江市', '429006': '天门市', '429021': '神农架林区', '430000': '湖南省', '430100': '长沙市', '430101': '市辖区', '430102': '芙蓉区', '430103': '天心区', '430104': '岳麓区', '430105': '开福区', '430111': '雨花区', '430112': '望城区', '430121': '长沙县', '430124': '宁乡县', '430181': '浏阳市', '430200': '株洲市', '430201': '市辖区', '430202': '荷塘区', '430203': '芦淞区', '430204': '石峰区', '430211': '天元区', '430221': '株洲县', '430223': '攸县', '430224': '茶陵县', '430225': '炎陵县', '430281': '醴陵市', '430300': '湘潭市', '430301': '市辖区', '430302': '雨湖区', '430304': '岳塘区', '430321': '湘潭县', '430381': '湘乡市', '430382': '韶山市', '430400': '衡阳市', '430401': '市辖区', '430405': '珠晖区', '430406': '雁峰区', '430407': '石鼓区', '430408': '蒸湘区', '430412': '南岳区', '430421': '衡阳县', '430422': '衡南县', '430423': '衡山县', '430424': '衡东县', '430426': '祁东县', '430481': '耒阳市', '430482': '常宁市', '430500': '邵阳市', '430501': '市辖区', '430502': '双清区', '430503': '大祥区', '430511': '北塔区', '430521': '邵东县', '430522': '新邵县', '430523': '邵阳县', '430524': '隆回县', '430525': '洞口县', '430527': '绥宁县', '430528': '新宁县', '430529': '城步苗族自治县', '430581': '武冈市', '430600': '岳阳市', '430601': '市辖区', '430602': '岳阳楼区', '430603': '云溪区', '430611': '君山区', '430621': '岳阳县', '430623': '华容县', '430624': '湘阴县', '430626': '平江县', '430681': '汨罗市', '430682': '临湘市', '430700': '常德市', '430701': '市辖区', '430702': '武陵区', '430703': '鼎城区', '430721': '安乡县', '430722': '汉寿县', '430723': '澧县', '430724': '临澧县', '430725': '桃源县', '430726': '石门县', '430781': '津市市', '430800': '张家界市', '430801': '市辖区', '430802': '永定区', '430811': '武陵源区', '430821': '慈利县', '430822': '桑植县', '430900': '益阳市', '430901': '市辖区', '430902': '资阳区', '430903': '赫山区', '430921': '南县', '430922': '桃江县', '430923': '安化县', '430981': '沅江市', '431000': '郴州市', '431001': '市辖区', '431002': '北湖区', '431003': '苏仙区', '431021': '桂阳县', '431022': '宜章县', '431023': '永兴县', '431024': '嘉禾县', '431025': '临武县', '431026': '汝城县', '431027': '桂东县', '431028': '安仁县', '431081': '资兴市', '431100': '永州市', '431101': '市辖区', '431102': '零陵区', '431103': '冷水滩区', '431121': '祁阳县', '431122': '东安县', '431123': '双牌县', '431124': '道县', '431125': '江永县', '431126': '宁远县', '431127': '蓝山县', '431128': '新田县', '431129': '江华瑶族自治县', '431200': '怀化市', '431201': '市辖区', '431202': '鹤城区', '431221': '中方县', '431222': '沅陵县', '431223': '辰溪县', '431224': '溆浦县', '431225': '会同县', '431226': '麻阳苗族自治县', '431227': '新晃侗族自治县', '431228': '芷江侗族自治县', '431229': '靖州苗族侗族自治县', '431230': '通道侗族自治县', '431281': '洪江市', '431300': '娄底市', '431301': '市辖区', '431302': '娄星区', '431321': '双峰县', '431322': '新化县', '431381': '冷水江市', '431382': '涟源市', '433100': '湘西土家族苗族自治州', '433101': '吉首市', '433122': '泸溪县', '433123': '凤凰县', '433124': '花垣县', '433125': '保靖县', '433126': '古丈县', '433127': '永顺县', '433130': '龙山县', '440000': '广东省', '440100': '广州市', '440101': '市辖区', '440103': '荔湾区', '440104': '越秀区', '440105': '海珠区', '440106': '天河区', '440111': '白云区', '440112': '黄埔区', '440113': '番禺区', '440114': '花都区', '440115': '南沙区', '440117': '从化区', '440118': '增城区', '440200': '韶关市', '440201': '市辖区', '440203': '武江区', '440204': '浈江区', '440205': '曲江区', '440222': '始兴县', '440224': '仁化县', '440229': '翁源县', '440232': '乳源瑶族自治县', '440233': '新丰县', '440281': '乐昌市', '440282': '南雄市', '440300': '深圳市', '440301': '市辖区', '440303': '罗湖区', '440304': '福田区', '440305': '南山区', '440306': '宝安区', '440307': '龙岗区', '440308': '盐田区', '440400': '珠海市', '440401': '市辖区', '440402': '香洲区', '440403': '斗门区', '440404': '金湾区', '440500': '汕头市', '440501': '市辖区', '440507': '龙湖区', '440511': '金平区', '440512': '濠江区', '440513': '潮阳区', '440514': '潮南区', '440515': '澄海区', '440523': '南澳县', '440600': '佛山市', '440601': '市辖区', '440604': '禅城区', '440605': '南海区', '440606': '顺德区', '440607': '三水区', '440608': '高明区', '440700': '江门市', '440701': '市辖区', '440703': '蓬江区', '440704': '江海区', '440705': '新会区', '440781': '台山市', '440783': '开平市', '440784': '鹤山市', '440785': '恩平市', '440800': '湛江市', '440801': '市辖区', '440802': '赤坎区', '440803': '霞山区', '440804': '坡头区', '440811': '麻章区', '440823': '遂溪县', '440825': '徐闻县', '440881': '廉江市', '440882': '雷州市', '440883': '吴川市', '440900': '茂名市', '440901': '市辖区', '440902': '茂南区', '440904': '电白区', '440981': '高州市', '440982': '化州市', '440983': '信宜市', '441200': '肇庆市', '441201': '市辖区', '441202': '端州区', '441203': '鼎湖区', '441204': '高要区', '441223': '广宁县', '441224': '怀集县', '441225': '封开县', '441226': '德庆县', '441284': '四会市', '441300': '惠州市', '441301': '市辖区', '441302': '惠城区', '441303': '惠阳区', '441322': '博罗县', '441323': '惠东县', '441324': '龙门县', '441400': '梅州市', '441401': '市辖区', '441402': '梅江区', '441403': '梅县区', '441422': '大埔县', '441423': '丰顺县', '441424': '五华县', '441426': '平远县', '441427': '蕉岭县', '441481': '兴宁市', '441500': '汕尾市', '441501': '市辖区', '441502': '城区', '441521': '海丰县', '441523': '陆河县', '441581': '陆丰市', '441600': '河源市', '441601': '市辖区', '441602': '源城区', '441621': '紫金县', '441622': '龙川县', '441623': '连平县', '441624': '和平县', '441625': '东源县', '441700': '阳江市', '441701': '市辖区', '441702': '江城区', '441704': '阳东区', '441721': '阳西县', '441781': '阳春市', '441800': '清远市', '441801': '市辖区', '441802': '清城区', '441803': '清新区', '441821': '佛冈县', '441823': '阳山县', '441825': '连山壮族瑶族自治县', '441826': '连南瑶族自治县', '441881': '英德市', '441882': '连州市', '441900': '东莞市', '442000': '中山市', '445100': '潮州市', '445101': '市辖区', '445102': '湘桥区', '445103': '潮安区', '445122': '饶平县', '445200': '揭阳市', '445201': '市辖区', '445202': '榕城区', '445203': '揭东区', '445222': '揭西县', '445224': '惠来县', '445281': '普宁市', '445300': '云浮市', '445301': '市辖区', '445302': '云城区', '445303': '云安区', '445321': '新兴县', '445322': '郁南县', '445381': '罗定市', '450000': '广西壮族自治区', '450100': '南宁市', '450101': '市辖区', '450102': '兴宁区', '450103': '青秀区', '450105': '江南区', '450107': '西乡塘区', '450108': '良庆区', '450109': '邕宁区', '450110': '武鸣区', '450123': '隆安县', '450124': '马山县', '450125': '上林县', '450126': '宾阳县', '450127': '横县', '450200': '柳州市', '450201': '市辖区', '450202': '城中区', '450203': '鱼峰区', '450204': '柳南区', '450205': '柳北区', '450221': '柳江县', '450222': '柳城县', '450223': '鹿寨县', '450224': '融安县', '450225': '融水苗族自治县', '450226': '三江侗族自治县', '450300': '桂林市', '450301': '市辖区', '450302': '秀峰区', '450303': '叠彩区', '450304': '象山区', '450305': '七星区', '450311': '雁山区', '450312': '临桂区', '450321': '阳朔县', '450323': '灵川县', '450324': '全州县', '450325': '兴安县', '450326': '永福县', '450327': '灌阳县', '450328': '龙胜各族自治县', '450329': '资源县', '450330': '平乐县', '450331': '荔浦县', '450332': '恭城瑶族自治县', '450400': '梧州市', '450401': '市辖区', '450403': '万秀区', '450405': '长洲区', '450406': '龙圩区', '450421': '苍梧县', '450422': '藤县', '450423': '蒙山县', '450481': '岑溪市', '450500': '北海市', '450501': '市辖区', '450502': '海城区', '450503': '银海区', '450512': '铁山港区', '450521': '合浦县', '450600': '防城港市', '450601': '市辖区', '450602': '港口区', '450603': '防城区', '450621': '上思县', '450681': '东兴市', '450700': '钦州市', '450701': '市辖区', '450702': '钦南区', '450703': '钦北区', '450721': '灵山县', '450722': '浦北县', '450800': '贵港市', '450801': '市辖区', '450802': '港北区', '450803': '港南区', '450804': '覃塘区', '450821': '平南县', '450881': '桂平市', '450900': '玉林市', '450901': '市辖区', '450902': '玉州区', '450903': '福绵区', '450921': '容县', '450922': '陆川县', '450923': '博白县', '450924': '兴业县', '450981': '北流市', '451000': '百色市', '451001': '市辖区', '451002': '右江区', '451021': '田阳县', '451022': '田东县', '451023': '平果县', '451024': '德保县', '451026': '那坡县', '451027': '凌云县', '451028': '乐业县', '451029': '田林县', '451030': '西林县', '451031': '隆林各族自治县', '451081': '靖西市', '451100': '贺州市', '451101': '市辖区', '451102': '八步区', '451121': '昭平县', '451122': '钟山县', '451123': '富川瑶族自治县', '451200': '河池市', '451201': '市辖区', '451202': '金城江区', '451221': '南丹县', '451222': '天峨县', '451223': '凤山县', '451224': '东兰县', '451225': '罗城仫佬族自治县', '451226': '环江毛南族自治县', '451227': '巴马瑶族自治县', '451228': '都安瑶族自治县', '451229': '大化瑶族自治县', '451281': '宜州市', '451300': '来宾市', '451301': '市辖区', '451302': '兴宾区', '451321': '忻城县', '451322': '象州县', '451323': '武宣县', '451324': '金秀瑶族自治县', '451381': '合山市', '451400': '崇左市', '451401': '市辖区', '451402': '江州区', '451421': '扶绥县', '451422': '宁明县', '451423': '龙州县', '451424': '大新县', '451425': '天等县', '451481': '凭祥市', '460000': '海南省', '460100': '海口市', '460101': '市辖区', '460105': '秀英区', '460106': '龙华区', '460107': '琼山区', '460108': '美兰区', '460200': '三亚市', '460201': '市辖区', '460202': '海棠区', '460203': '吉阳区', '460204': '天涯区', '460205': '崖州区', '460300': '三沙市', '460321': '西沙群岛', '460322': '南沙群岛', '460323': '中沙群岛的岛礁及其海域', '469000': '省直辖县级行政区划', '469001': '五指山市', '469002': '琼海市', '469003': '儋州市', '469005': '文昌市', '469006': '万宁市', '469007': '东方市', '469021': '定安县', '469022': '屯昌县', '469023': '澄迈县', '469024': '临高县', '469025': '白沙黎族自治县', '469026': '昌江黎族自治县', '469027': '乐东黎族自治县', '469028': '陵水黎族自治县', '469029': '保亭黎族苗族自治县', '469030': '琼中黎族苗族自治县', '500000': '重庆市', '500100': '市辖区', '500101': '万州区', '500102': '涪陵区', '500103': '渝中区', '500104': '大渡口区', '500105': '江北区', '500106': '沙坪坝区', '500107': '九龙坡区', '500108': '南岸区', '500109': '北碚区', '500110': '綦江区', '500111': '大足区', '500112': '渝北区', '500113': '巴南区', '500114': '黔江区', '500115': '长寿区', '500116': '江津区', '500117': '合川区', '500118': '永川区', '500119': '南川区', '500120': '璧山区', '500151': '铜梁区', '500152': '潼南区', '500153': '荣昌区', '500200': '县', '500228': '梁平县', '500229': '城口县', '500230': '丰都县', '500231': '垫江县', '500232': '武隆县', '500233': '忠县', '500234': '开县', '500235': '云阳县', '500236': '奉节县', '500237': '巫山县', '500238': '巫溪县', '500240': '石柱土家族自治县', '500241': '秀山土家族苗族自治县', '500242': '酉阳土家族苗族自治县', '500243': '彭水苗族土家族自治县', '510000': '四川省', '510100': '成都市', '510101': '市辖区', '510104': '锦江区', '510105': '青羊区', '510106': '金牛区', '510107': '武侯区', '510108': '成华区', '510112': '龙泉驿区', '510113': '青白江区', '510114': '新都区', '510115': '温江区', '510121': '金堂县', '510122': '双流县', '510124': '郫县', '510129': '大邑县', '510131': '蒲江县', '510132': '新津县', '510181': '都江堰市', '510182': '彭州市', '510183': '邛崃市', '510184': '崇州市', '510300': '自贡市', '510301': '市辖区', '510302': '自流井区', '510303': '贡井区', '510304': '大安区', '510311': '沿滩区', '510321': '荣县', '510322': '富顺县', '510400': '攀枝花市', '510401': '市辖区', '510402': '东区', '510403': '西区', '510411': '仁和区', '510421': '米易县', '510422': '盐边县', '510500': '泸州市', '510501': '市辖区', '510502': '江阳区', '510503': '纳溪区', '510504': '龙马潭区', '510521': '泸县', '510522': '合江县', '510524': '叙永县', '510525': '古蔺县', '510600': '德阳市', '510601': '市辖区', '510603': '旌阳区', '510623': '中江县', '510626': '罗江县', '510681': '广汉市', '510682': '什邡市', '510683': '绵竹市', '510700': '绵阳市', '510701': '市辖区', '510703': '涪城区', '510704': '游仙区', '510722': '三台县', '510723': '盐亭县', '510724': '安县', '510725': '梓潼县', '510726': '北川羌族自治县', '510727': '平武县', '510781': '江油市', '510800': '广元市', '510801': '市辖区', '510802': '利州区', '510811': '昭化区', '510812': '朝天区', '510821': '旺苍县', '510822': '青川县', '510823': '剑阁县', '510824': '苍溪县', '510900': '遂宁市', '510901': '市辖区', '510903': '船山区', '510904': '安居区', '510921': '蓬溪县', '510922': '射洪县', '510923': '大英县', '511000': '内江市', '511001': '市辖区', '511002': '市中区', '511011': '东兴区', '511024': '威远县', '511025': '资中县', '511028': '隆昌县', '511100': '乐山市', '511101': '市辖区', '511102': '市中区', '511111': '沙湾区', '511112': '五通桥区', '511113': '金口河区', '511123': '犍为县', '511124': '井研县', '511126': '夹江县', '511129': '沐川县', '511132': '峨边彝族自治县', '511133': '马边彝族自治县', '511181': '峨眉山市', '511300': '南充市', '511301': '市辖区', '511302': '顺庆区', '511303': '高坪区', '511304': '嘉陵区', '511321': '南部县', '511322': '营山县', '511323': '蓬安县', '511324': '仪陇县', '511325': '西充县', '511381': '阆中市', '511400': '眉山市', '511401': '市辖区', '511402': '东坡区', '511403': '彭山区', '511421': '仁寿县', '511423': '洪雅县', '511424': '丹棱县', '511425': '青神县', '511500': '宜宾市', '511501': '市辖区', '511502': '翠屏区', '511503': '南溪区', '511521': '宜宾县', '511523': '江安县', '511524': '长宁县', '511525': '高县', '511526': '珙县', '511527': '筠连县', '511528': '兴文县', '511529': '屏山县', '511600': '广安市', '511601': '市辖区', '511602': '广安区', '511603': '前锋区', '511621': '岳池县', '511622': '武胜县', '511623': '邻水县', '511681': '华蓥市', '511700': '达州市', '511701': '市辖区', '511702': '通川区', '511703': '达川区', '511722': '宣汉县', '511723': '开江县', '511724': '大竹县', '511725': '渠县', '511781': '万源市', '511800': '雅安市', '511801': '市辖区', '511802': '雨城区', '511803': '名山区', '511822': '荥经县', '511823': '汉源县', '511824': '石棉县', '511825': '天全县', '511826': '芦山县', '511827': '宝兴县', '511900': '巴中市', '511901': '市辖区', '511902': '巴州区', '511903': '恩阳区', '511921': '通江县', '511922': '南江县', '511923': '平昌县', '512000': '资阳市', '512001': '市辖区', '512002': '雁江区', '512021': '安岳县', '512022': '乐至县', '512081': '简阳市', '513200': '阿坝藏族羌族自治州', '513221': '汶川县', '513222': '理县', '513223': '茂县', '513224': '松潘县', '513225': '九寨沟县', '513226': '金川县', '513227': '小金县', '513228': '黑水县', '513229': '马尔康县', '513230': '壤塘县', '513231': '阿坝县', '513232': '若尔盖县', '513233': '红原县', '513300': '甘孜藏族自治州', '513301': '康定市', '513322': '泸定县', '513323': '丹巴县', '513324': '九龙县', '513325': '雅江县', '513326': '道孚县', '513327': '炉霍县', '513328': '甘孜县', '513329': '新龙县', '513330': '德格县', '513331': '白玉县', '513332': '石渠县', '513333': '色达县', '513334': '理塘县', '513335': '巴塘县', '513336': '乡城县', '513337': '稻城县', '513338': '得荣县', '513400': '凉山彝族自治州', '513401': '西昌市', '513422': '木里藏族自治县', '513423': '盐源县', '513424': '德昌县', '513425': '会理县', '513426': '会东县', '513427': '宁南县', '513428': '普格县', '513429': '布拖县', '513430': '金阳县', '513431': '昭觉县', '513432': '喜德县', '513433': '冕宁县', '513434': '越西县', '513435': '甘洛县', '513436': '美姑县', '513437': '雷波县', '520000': '贵州省', '520100': '贵阳市', '520101': '市辖区', '520102': '南明区', '520103': '云岩区', '520111': '花溪区', '520112': '乌当区', '520113': '白云区', '520115': '观山湖区', '520121': '开阳县', '520122': '息烽县', '520123': '修文县', '520181': '清镇市', '520200': '六盘水市', '520201': '钟山区', '520203': '六枝特区', '520221': '水城县', '520222': '盘县', '520300': '遵义市', '520301': '市辖区', '520302': '红花岗区', '520303': '汇川区', '520321': '遵义县', '520322': '桐梓县', '520323': '绥阳县', '520324': '正安县', '520325': '道真仡佬族苗族自治县', '520326': '务川仡佬族苗族自治县', '520327': '凤冈县', '520328': '湄潭县', '520329': '余庆县', '520330': '习水县', '520381': '赤水市', '520382': '仁怀市', '520400': '安顺市', '520401': '市辖区', '520402': '西秀区', '520403': '平坝区', '520422': '普定县', '520423': '镇宁布依族苗族自治县', '520424': '关岭布依族苗族自治县', '520425': '紫云苗族布依族自治县', '520500': '毕节市', '520501': '市辖区', '520502': '七星关区', '520521': '大方县', '520522': '黔西县', '520523': '金沙县', '520524': '织金县', '520525': '纳雍县', '520526': '威宁彝族回族苗族自治县', '520527': '赫章县', '520600': '铜仁市', '520601': '市辖区', '520602': '碧江区', '520603': '万山区', '520621': '江口县', '520622': '玉屏侗族自治县', '520623': '石阡县', '520624': '思南县', '520625': '印江土家族苗族自治县', '520626': '德江县', '520627': '沿河土家族自治县', '520628': '松桃苗族自治县', '522300': '黔西南布依族苗族自治州', '522301': '兴义市', '522322': '兴仁县', '522323': '普安县', '522324': '晴隆县', '522325': '贞丰县', '522326': '望谟县', '522327': '册亨县', '522328': '安龙县', '522600': '黔东南苗族侗族自治州', '522601': '凯里市', '522622': '黄平县', '522623': '施秉县', '522624': '三穗县', '522625': '镇远县', '522626': '岑巩县', '522627': '天柱县', '522628': '锦屏县', '522629': '剑河县', '522630': '台江县', '522631': '黎平县', '522632': '榕江县', '522633': '从江县', '522634': '雷山县', '522635': '麻江县', '522636': '丹寨县', '522700': '黔南布依族苗族自治州', '522701': '都匀市', '522702': '福泉市', '522722': '荔波县', '522723': '贵定县', '522725': '瓮安县', '522726': '独山县', '522727': '平塘县', '522728': '罗甸县', '522729': '长顺县', '522730': '龙里县', '522731': '惠水县', '522732': '三都水族自治县', '530000': '云南省', '530100': '昆明市', '530101': '市辖区', '530102': '五华区', '530103': '盘龙区', '530111': '官渡区', '530112': '西山区', '530113': '东川区', '530114': '呈贡区', '530122': '晋宁县', '530124': '富民县', '530125': '宜良县', '530126': '石林彝族自治县', '530127': '嵩明县', '530128': '禄劝彝族苗族自治县', '530129': '寻甸回族彝族自治县', '530181': '安宁市', '530300': '曲靖市', '530301': '市辖区', '530302': '麒麟区', '530321': '马龙县', '530322': '陆良县', '530323': '师宗县', '530324': '罗平县', '530325': '富源县', '530326': '会泽县', '530328': '沾益县', '530381': '宣威市', '530400': '玉溪市', '530401': '市辖区', '530402': '红塔区', '530421': '江川县', '530422': '澄江县', '530423': '通海县', '530424': '华宁县', '530425': '易门县', '530426': '峨山彝族自治县', '530427': '新平彝族傣族自治县', '530428': '元江哈尼族彝族傣族自治县', '530500': '保山市', '530501': '市辖区', '530502': '隆阳区', '530521': '施甸县', '530523': '龙陵县', '530524': '昌宁县', '530581': '腾冲市', '530600': '昭通市', '530601': '市辖区', '530602': '昭阳区', '530621': '鲁甸县', '530622': '巧家县', '530623': '盐津县', '530624': '大关县', '530625': '永善县', '530626': '绥江县', '530627': '镇雄县', '530628': '彝良县', '530629': '威信县', '530630': '水富县', '530700': '丽江市', '530701': '市辖区', '530702': '古城区', '530721': '玉龙纳西族自治县', '530722': '永胜县', '530723': '华坪县', '530724': '宁蒗彝族自治县', '530800': '普洱市', '530801': '市辖区', '530802': '思茅区', '530821': '宁洱哈尼族彝族自治县', '530822': '墨江哈尼族自治县', '530823': '景东彝族自治县', '530824': '景谷傣族彝族自治县', '530825': '镇沅彝族哈尼族拉祜族自治县', '530826': '江城哈尼族彝族自治县', '530827': '孟连傣族拉祜族佤族自治县', '530828': '澜沧拉祜族自治县', '530829': '西盟佤族自治县', '530900': '临沧市', '530901': '市辖区', '530902': '临翔区', '530921': '凤庆县', '530922': '云县', '530923': '永德县', '530924': '镇康县', '530925': '双江拉祜族佤族布朗族傣族自治县', '530926': '耿马傣族佤族自治县', '530927': '沧源佤族自治县', '532300': '楚雄彝族自治州', '532301': '楚雄市', '532322': '双柏县', '532323': '牟定县', '532324': '南华县', '532325': '姚安县', '532326': '大姚县', '532327': '永仁县', '532328': '元谋县', '532329': '武定县', '532331': '禄丰县', '532500': '红河哈尼族彝族自治州', '532501': '个旧市', '532502': '开远市', '532503': '蒙自市', '532504': '弥勒市', '532523': '屏边苗族自治县', '532524': '建水县', '532525': '石屏县', '532527': '泸西县', '532528': '元阳县', '532529': '红河县', '532530': '金平苗族瑶族傣族自治县', '532531': '绿春县', '532532': '河口瑶族自治县', '532600': '文山壮族苗族自治州', '532601': '文山市', '532622': '砚山县', '532623': '西畴县', '532624': '麻栗坡县', '532625': '马关县', '532626': '丘北县', '532627': '广南县', '532628': '富宁县', '532800': '西双版纳傣族自治州', '532801': '景洪市', '532822': '勐海县', '532823': '勐腊县', '532900': '大理白族自治州', '532901': '大理市', '532922': '漾濞彝族自治县', '532923': '祥云县', '532924': '宾川县', '532925': '弥渡县', '532926': '南涧彝族自治县', '532927': '巍山彝族回族自治县', '532928': '永平县', '532929': '云龙县', '532930': '洱源县', '532931': '剑川县', '532932': '鹤庆县', '533100': '德宏傣族景颇族自治州', '533102': '瑞丽市', '533103': '芒市', '533122': '梁河县', '533123': '盈江县', '533124': '陇川县', '533300': '怒江傈僳族自治州', '533321': '泸水县', '533323': '福贡县', '533324': '贡山独龙族怒族自治县', '533325': '兰坪白族普米族自治县', '533400': '迪庆藏族自治州', '533401': '香格里拉市', '533422': '德钦县', '533423': '维西傈僳族自治县', '540000': '西藏自治区', '540100': '拉萨市', '540101': '市辖区', '540102': '城关区', '540121': '林周县', '540122': '当雄县', '540123': '尼木县', '540124': '曲水县', '540125': '堆龙德庆县', '540126': '达孜县', '540127': '墨竹工卡县', '540200': '日喀则市', '540202': '桑珠孜区', '540221': '南木林县', '540222': '江孜县', '540223': '定日县', '540224': '萨迦县', '540225': '拉孜县', '540226': '昂仁县', '540227': '谢通门县', '540228': '白朗县', '540229': '仁布县', '540230': '康马县', '540231': '定结县', '540232': '仲巴县', '540233': '亚东县', '540234': '吉隆县', '540235': '聂拉木县', '540236': '萨嘎县', '540237': '岗巴县', '540300': '昌都市', '540302': '卡若区', '540321': '江达县', '540322': '贡觉县', '540323': '类乌齐县', '540324': '丁青县', '540325': '察雅县', '540326': '八宿县', '540327': '左贡县', '540328': '芒康县', '540329': '洛隆县', '540330': '边坝县', '540400': '林芝市', '540402': '巴宜区', '540421': '工布江达县', '540422': '米林县', '540423': '墨脱县', '540424': '波密县', '540425': '察隅县', '540426': '朗县', '542200': '山南地区', '542221': '乃东县', '542222': '扎囊县', '542223': '贡嘎县', '542224': '桑日县', '542225': '琼结县', '542226': '曲松县', '542227': '措美县', '542228': '洛扎县', '542229': '加查县', '542231': '隆子县', '542232': '错那县', '542233': '浪卡子县', '542400': '那曲地区', '542421': '那曲县', '542422': '嘉黎县', '542423': '比如县', '542424': '聂荣县', '542425': '安多县', '542426': '申扎县', '542427': '索县', '542428': '班戈县', '542429': '巴青县', '542430': '尼玛县', '542431': '双湖县', '542500': '阿里地区', '542521': '普兰县', '542522': '札达县', '542523': '噶尔县', '542524': '日土县', '542525': '革吉县', '542526': '改则县', '542527': '措勤县', '610000': '陕西省', '610100': '西安市', '610101': '市辖区', '610102': '新城区', '610103': '碑林区', '610104': '莲湖区', '610111': '灞桥区', '610112': '未央区', '610113': '雁塔区', '610114': '阎良区', '610115': '临潼区', '610116': '长安区', '610117': '高陵区', '610122': '蓝田县', '610124': '周至县', '610125': '户县', '610200': '铜川市', '610201': '市辖区', '610202': '王益区', '610203': '印台区', '610204': '耀州区', '610222': '宜君县', '610300': '宝鸡市', '610301': '市辖区', '610302': '渭滨区', '610303': '金台区', '610304': '陈仓区', '610322': '凤翔县', '610323': '岐山县', '610324': '扶风县', '610326': '眉县', '610327': '陇县', '610328': '千阳县', '610329': '麟游县', '610330': '凤县', '610331': '太白县', '610400': '咸阳市', '610401': '市辖区', '610402': '秦都区', '610403': '杨陵区', '610404': '渭城区', '610422': '三原县', '610423': '泾阳县', '610424': '乾县', '610425': '礼泉县', '610426': '永寿县', '610427': '彬县', '610428': '长武县', '610429': '旬邑县', '610430': '淳化县', '610431': '武功县', '610481': '兴平市', '610500': '渭南市', '610501': '市辖区', '610502': '临渭区', '610521': '华县', '610522': '潼关县', '610523': '大荔县', '610524': '合阳县', '610525': '澄城县', '610526': '蒲城县', '610527': '白水县', '610528': '富平县', '610581': '韩城市', '610582': '华阴市', '610600': '延安市', '610601': '市辖区', '610602': '宝塔区', '610621': '延长县', '610622': '延川县', '610623': '子长县', '610624': '安塞县', '610625': '志丹县', '610626': '吴起县', '610627': '甘泉县', '610628': '富县', '610629': '洛川县', '610630': '宜川县', '610631': '黄龙县', '610632': '黄陵县', '610700': '汉中市', '610701': '市辖区', '610702': '汉台区', '610721': '南郑县', '610722': '城固县', '610723': '洋县', '610724': '西乡县', '610725': '勉县', '610726': '宁强县', '610727': '略阳县', '610728': '镇巴县', '610729': '留坝县', '610730': '佛坪县', '610800': '榆林市', '610801': '市辖区', '610802': '榆阳区', '610821': '神木县', '610822': '府谷县', '610823': '横山县', '610824': '靖边县', '610825': '定边县', '610826': '绥德县', '610827': '米脂县', '610828': '佳县', '610829': '吴堡县', '610830': '清涧县', '610831': '子洲县', '610900': '安康市', '610901': '市辖区', '610902': '汉滨区', '610921': '汉阴县', '610922': '石泉县', '610923': '宁陕县', '610924': '紫阳县', '610925': '岚皋县', '610926': '平利县', '610927': '镇坪县', '610928': '旬阳县', '610929': '白河县', '611000': '商洛市', '611001': '市辖区', '611002': '商州区', '611021': '洛南县', '611022': '丹凤县', '611023': '商南县', '611024': '山阳县', '611025': '镇安县', '611026': '柞水县', '620000': '甘肃省', '620100': '兰州市', '620101': '市辖区', '620102': '城关区', '620103': '七里河区', '620104': '西固区', '620105': '安宁区', '620111': '红古区', '620121': '永登县', '620122': '皋兰县', '620123': '榆中县', '620200': '嘉峪关市', '620201': '市辖区', '620300': '金昌市', '620301': '市辖区', '620302': '金川区', '620321': '永昌县', '620400': '白银市', '620401': '市辖区', '620402': '白银区', '620403': '平川区', '620421': '靖远县', '620422': '会宁县', '620423': '景泰县', '620500': '天水市', '620501': '市辖区', '620502': '秦州区', '620503': '麦积区', '620521': '清水县', '620522': '秦安县', '620523': '甘谷县', '620524': '武山县', '620525': '张家川回族自治县', '620600': '武威市', '620601': '市辖区', '620602': '凉州区', '620621': '民勤县', '620622': '古浪县', '620623': '天祝藏族自治县', '620700': '张掖市', '620701': '市辖区', '620702': '甘州区', '620721': '肃南裕固族自治县', '620722': '民乐县', '620723': '临泽县', '620724': '高台县', '620725': '山丹县', '620800': '平凉市', '620801': '市辖区', '620802': '崆峒区', '620821': '泾川县', '620822': '灵台县', '620823': '崇信县', '620824': '华亭县', '620825': '庄浪县', '620826': '静宁县', '620900': '酒泉市', '620901': '市辖区', '620902': '肃州区', '620921': '金塔县', '620922': '瓜州县', '620923': '肃北蒙古族自治县', '620924': '阿克塞哈萨克族自治县', '620981': '玉门市', '620982': '敦煌市', '621000': '庆阳市', '621001': '市辖区', '621002': '西峰区', '621021': '庆城县', '621022': '环县', '621023': '华池县', '621024': '合水县', '621025': '正宁县', '621026': '宁县', '621027': '镇原县', '621100': '定西市', '621101': '市辖区', '621102': '安定区', '621121': '通渭县', '621122': '陇西县', '621123': '渭源县', '621124': '临洮县', '621125': '漳县', '621126': '岷县', '621200': '陇南市', '621201': '市辖区', '621202': '武都区', '621221': '成县', '621222': '文县', '621223': '宕昌县', '621224': '康县', '621225': '西和县', '621226': '礼县', '621227': '徽县', '621228': '两当县', '622900': '临夏回族自治州', '622901': '临夏市', '622921': '临夏县', '622922': '康乐县', '622923': '永靖县', '622924': '广河县', '622925': '和政县', '622926': '东乡族自治县', '622927': '积石山保安族东乡族撒拉族自治县', '623000': '甘南藏族自治州', '623001': '合作市', '623021': '临潭县', '623022': '卓尼县', '623023': '舟曲县', '623024': '迭部县', '623025': '玛曲县', '623026': '碌曲县', '623027': '夏河县', '630000': '青海省', '630100': '西宁市', '630101': '市辖区', '630102': '城东区', '630103': '城中区', '630104': '城西区', '630105': '城北区', '630121': '大通回族土族自治县', '630122': '湟中县', '630123': '湟源县', '630200': '海东市', '630202': '乐都区', '630203': '平安区', '630222': '民和回族土族自治县', '630223': '互助土族自治县', '630224': '化隆回族自治县', '630225': '循化撒拉族自治县', '632200': '海北藏族自治州', '632221': '门源回族自治县', '632222': '祁连县', '632223': '海晏县', '632224': '刚察县', '632300': '黄南藏族自治州', '632321': '同仁县', '632322': '尖扎县', '632323': '泽库县', '632324': '河南蒙古族自治县', '632500': '海南藏族自治州', '632521': '共和县', '632522': '同德县', '632523': '贵德县', '632524': '兴海县', '632525': '贵南县', '632600': '果洛藏族自治州', '632621': '玛沁县', '632622': '班玛县', '632623': '甘德县', '632624': '达日县', '632625': '久治县', '632626': '玛多县', '632700': '玉树藏族自治州', '632701': '玉树市', '632722': '杂多县', '632723': '称多县', '632724': '治多县', '632725': '囊谦县', '632726': '曲麻莱县', '632800': '海西蒙古族藏族自治州', '632801': '格尔木市', '632802': '德令哈市', '632821': '乌兰县', '632822': '都兰县', '632823': '天峻县', '640000': '宁夏回族自治区', '640100': '银川市', '640101': '市辖区', '640104': '兴庆区', '640105': '西夏区', '640106': '金凤区', '640121': '永宁县', '640122': '贺兰县', '640181': '灵武市', '640200': '石嘴山市', '640201': '市辖区', '640202': '大武口区', '640205': '惠农区', '640221': '平罗县', '640300': '吴忠市', '640301': '市辖区', '640302': '利通区', '640303': '红寺堡区', '640323': '盐池县', '640324': '同心县', '640381': '青铜峡市', '640400': '固原市', '640401': '市辖区', '640402': '原州区', '640422': '西吉县', '640423': '隆德县', '640424': '泾源县', '640425': '彭阳县', '640500': '中卫市', '640501': '市辖区', '640502': '沙坡头区', '640521': '中宁县', '640522': '海原县', '650000': '新疆维吾尔自治区', '650100': '乌鲁木齐市', '650101': '市辖区', '650102': '天山区', '650103': '沙依巴克区', '650104': '新市区', '650105': '水磨沟区', '650106': '头屯河区', '650107': '达坂城区', '650109': '米东区', '650121': '乌鲁木齐县', '650200': '克拉玛依市', '650201': '市辖区', '650202': '独山子区', '650203': '克拉玛依区', '650204': '白碱滩区', '650205': '乌尔禾区', '650400': '吐鲁番市', '650402': '高昌区', '650421': '鄯善县', '650422': '托克逊县', '652200': '哈密地区', '652201': '哈密市', '652222': '巴里坤哈萨克自治县', '652223': '伊吾县', '652300': '昌吉回族自治州', '652301': '昌吉市', '652302': '阜康市', '652323': '呼图壁县', '652324': '玛纳斯县', '652325': '奇台县', '652327': '吉木萨尔县', '652328': '木垒哈萨克自治县', '652700': '博尔塔拉蒙古自治州', '652701': '博乐市', '652702': '阿拉山口市', '652722': '精河县', '652723': '温泉县', '652800': '巴音郭楞蒙古自治州', '652801': '库尔勒市', '652822': '轮台县', '652823': '尉犁县', '652824': '若羌县', '652825': '且末县', '652826': '焉耆回族自治县', '652827': '和静县', '652828': '和硕县', '652829': '博湖县', '652900': '阿克苏地区', '652901': '阿克苏市', '652922': '温宿县', '652923': '库车县', '652924': '沙雅县', '652925': '新和县', '652926': '拜城县', '652927': '乌什县', '652928': '阿瓦提县', '652929': '柯坪县', '653000': '克孜勒苏柯尔克孜自治州', '653001': '阿图什市', '653022': '阿克陶县', '653023': '阿合奇县', '653024': '乌恰县', '653100': '喀什地区', '653101': '喀什市', '653121': '疏附县', '653122': '疏勒县', '653123': '英吉沙县', '653124': '泽普县', '653125': '莎车县', '653126': '叶城县', '653127': '麦盖提县', '653128': '岳普湖县', '653129': '伽师县', '653130': '巴楚县', '653131': '塔什库尔干塔吉克自治县', '653200': '和田地区', '653201': '和田市', '653221': '和田县', '653222': '墨玉县', '653223': '皮山县', '653224': '洛浦县', '653225': '策勒县', '653226': '于田县', '653227': '民丰县', '654000': '伊犁哈萨克自治州', '654002': '伊宁市', '654003': '奎屯市', '654004': '霍尔果斯市', '654021': '伊宁县', '654022': '察布查尔锡伯自治县', '654023': '霍城县', '654024': '巩留县', '654025': '新源县', '654026': '昭苏县', '654027': '特克斯县', '654028': '尼勒克县', '654200': '塔城地区', '654201': '塔城市', '654202': '乌苏市', '654221': '额敏县', '654223': '沙湾县', '654224': '托里县', '654225': '裕民县', '654226': '和布克赛尔蒙古自治县', '654300': '阿勒泰地区', '654301': '阿勒泰市', '654321': '布尔津县', '654322': '富蕴县', '654323': '福海县', '654324': '哈巴河县', '654325': '青河县', '654326': '吉木乃县', '659000': '自治区直辖县级行政区划', '659001': '石河子市', '659002': '阿拉尔市', '659003': '图木舒克市', '659004': '五家渠市', '710000': '台湾省', '810000': '香港特别行政区', '820000': '澳门特别行政区', }
PypiClean
/Hunabku_siiu-0.0.5.tar.gz/Hunabku_siiu-0.0.5/hunabku_siiu/endpoints/SIIU.py
from hunabku.HunabkuBase import HunabkuPluginBase, endpoint from hunabku.Config import Config, Param from pymongo import MongoClient from elasticsearch import Elasticsearch, __version__ as es_version from elasticsearch_dsl import Search import time class SIIU(HunabkuPluginBase): config = Config() config += Param(mdb_uri="mongodb://localhost:27017/", doc="MongoDB string connection") config += Param(mdb_name="siiu", doc="MongoDB name for SIIU") config += Param(es_uri="http://localhost:9200", doc="Elastic Search url") config += Param(es_user="elastic", doc="Elastic Search user") config += Param(es_pass="colav", doc="Elastic Search password") config += Param(es_project_index="siiu_project", doc="Elastic Search siiu project index name") def __init__(self, hunabku): super().__init__(hunabku) self.dbclient = MongoClient(self.config.mdb_uri) auth = (self.config.es_user, self.config.es_pass) if es_version[0] < 8: self.es = Elasticsearch(self.config.es_uri, http_auth=auth) else: self.es = Elasticsearch(self.config.es_uri, basic_auth=auth) def check_index(self): if not self.es.indices.exists(index=self.config.es_project_index): response = self.app.response_class( response=self.json.dumps( {"msg": f"Internal error, index {self.config.es_project_index} not found in Elastic Search"}), status=500, mimetype='application/json' ) return response return None @endpoint('/siiu/project', methods=['GET']) def siiu_project(self): """ @api {get} /siiu/project Project @apiName Project @apiGroup SIIU @apiDescription Allows to perform queries for projects, you can search by project id or by keyword. The search by keyword perform a search in teh fields of text NOMBRE_CORTO, NOMBRE_COMPLETO, PALABRAS_CLAVES, descriptive_text.TEXTO_INGRESADO lots of text where indexed for this search. @apiParam {String} apikey Credential for authentication @apiParam {String} search keyword for text search. @apiParam {String} CODIGO project id. @apiParam {String} group_code Colciencias Group ID ex:"COL0008423" @apiParam {String} group_name name of the research group (returns the projects for this group) @apiParam {String} participant_name name of the project participant (returns the projects for this participant) @apiParam {String} participant_id id of the participant (returns the projects for this participant) @apiSuccess {Object} Resgisters from MongoDB in Json format. @apiError (Error 401) msg The HTTP 401 Unauthorized invalid authentication apikey for the target resource. @apiError (Error 400) msg Bad request, if the query is not right. @apiExample {curl} Example usage: # all the products for the user curl -i http://apis.colav.co/siiu/project?apikey=XXXX&search=keyword # An specific product curl -i http://apis.colav.co/siiu/project?apikey=XXXX&CODIGO=2013-86 # An projects given a group id curl -i http://apis.colav.co/siiu/project?apikey=XXXX&group_code=COL0008423 # An projects given a group name curl -i http://apis.colav.co/siiu/project?apikey=XXXX&group_name="psicologia cognitiva" # An projects given a participant name curl -i http://apis.colav.co/siiu/project?apikey=XXXX&participant_name="Diego Alejandro Restrepo Quintero" # An projects given a participant id curl -i http://apis.colav.co/siiu/project?apikey=XXXX&participant_id="xxxx" """ if self.valid_apikey(): keyword = self.request.args.get('search') codigo = self.request.args.get('CODIGO') grp_codigo = self.request.args.get('group_code') group_name = self.request.args.get('group_name') participant_name = self.request.args.get('participant_name') participant_id = self.request.args.get('participant_id') if keyword: check = self.check_index() if check is not None: return check body = {"query": { "bool": { "should": [ {"match": {"NOMBRE_CORTO": keyword}}, {"match": {"NOMBRE_COMPLETO": keyword}}, {"match": {"PALABRAS_CLAVES": keyword}}, {"match": {"descriptive_text.TEXTO_INGRESADO": keyword}} ] } } } # get the start time st = time.time() s = Search(using=self.es, index=self.config.es_project_index) s = s.update_from_dict(body) s = s.extra(track_total_hits=True) s.execute() data = [hit.to_dict() for hit in s.scan()] response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) # get the end time et = time.time() # get the execution time elapsed_time = et - st print(f'Search for "{keyword}" Execution time:', elapsed_time, 'seconds') return response if codigo: data = list(self.dbclient[self.config.mdb_name] ["project"].find({'CODIGO': codigo}, {'_id': 0, })) response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) return response if grp_codigo: data = list(self.dbclient[self.config.mdb_name] ["project"].find({"project_participant.group.CODIGO_COLCIENCIAS": grp_codigo}, {"_id": 0})) response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) return response if participant_id: data = list(self.dbclient[self.config.mdb_name] ["project"].find({"project_participant.PERSONA_NATURAL": participant_id}, {"_id": 0})) response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) return response if group_name: check = self.check_index() if check is not None: return check body = { "query": { "bool": { "must": [ {"match_phrase": { "project_participant.group.NOMBRE_COMPLETO": group_name}}, ] } } } # get the start time st = time.time() s = Search(using=self.es, index=self.config.es_project_index) s = s.update_from_dict(body) s = s.extra(track_total_hits=True) s.execute() data = [hit.to_dict() for hit in s.scan()] response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) # get the end time et = time.time() # get the execution time elapsed_time = et - st print(f'Search for "{group_name}" Execution time:', elapsed_time, 'seconds') return response if participant_name: check = self.check_index() if check is not None: return check body = { "query": { "bool": { "must": [ {"match_phrase": { "project_participant.NOMBRE_COMPLETO": participant_name}}, ] } } } # get the start time st = time.time() s = Search(using=self.es, index=self.config.es_project_index) s = s.update_from_dict(body) s = s.extra(track_total_hits=True) s.execute() data = [hit.to_dict() for hit in s.scan()] response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) # get the end time et = time.time() # get the execution time elapsed_time = et - st print(f'Search for "{group_name}" Execution time:', elapsed_time, 'seconds') return response data = { "error": "Bad Request", "message": "invalid parameters, please select the right combination of parameters."} response = self.app.response_class( response=self.json.dumps(data), status=400, mimetype='application/json' ) return response else: return self.apikey_error() @endpoint('/siiu/info', methods=['GET']) def config_end(self): """ @api {get} /siiu/info Info @apiName Info @apiGroup SIIU @apiDescription Allows to get information of the projects such as ids (CODIGO) @apiParam {String} apikey Credential for authentication @apiSuccess {Object} Resgisters from MongoDB in Json format. @apiError (Error 401) msg The HTTP 401 Unauthorized invalid authentication apikey for the target resource. @apiError (Error 400) msg Bad request, if the query is not right. @apiExample {curl} Example usage: # all the products for the user curl -i http://apis.colav.co/siiu/info?apikey=XXXX """ data = list(self.dbclient[self.config.mdb_name] ["project"].find({}, {'_id': 0, 'CODIGO': 1})) response = self.app.response_class( response=self.json.dumps(data), status=200, mimetype='application/json' ) return response
PypiClean
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/color-picker.js
module.exports = /******/ (function (modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if (installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function (exports, name, getter) { /******/ if (!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, {enumerable: true, get: getter}); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function (exports) { /******/ if (typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'}); /******/ } /******/ Object.defineProperty(exports, '__esModule', {value: true}); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function (value, mode) { /******/ if (mode & 1) value = __webpack_require__(value); /******/ if (mode & 8) return value; /******/ if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', {enumerable: true, value: value}); /******/ if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function (module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function (object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = "/dist/"; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 53); /******/ }) /************************************************************************/ /******/({ /***/ 0: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; /* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function () { return normalizeComponent; }); /* globals __VUE_SSR_CONTEXT__ */ // IMPORTANT: Do NOT use ES2015 features in this file (except for modules). // This module is a runtime utility for cleaner component module output and will // be included in the final webpack user bundle. function normalizeComponent( scriptExports, render, staticRenderFns, functionalTemplate, injectStyles, scopeId, moduleIdentifier, /* server only */ shadowMode /* vue-cli only */ ) { // Vue.extend constructor export interop var options = typeof scriptExports === 'function' ? scriptExports.options : scriptExports // render functions if (render) { options.render = render options.staticRenderFns = staticRenderFns options._compiled = true } // functional template if (functionalTemplate) { options.functional = true } // scopedId if (scopeId) { options._scopeId = 'data-v-' + scopeId } var hook if (moduleIdentifier) { // server build hook = function (context) { // 2.3 injection context = context || // cached call (this.$vnode && this.$vnode.ssrContext) || // stateful (this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional // 2.2 with runInNewContext: true if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') { context = __VUE_SSR_CONTEXT__ } // inject component styles if (injectStyles) { injectStyles.call(this, context) } // register component module identifier for async chunk inferrence if (context && context._registeredComponents) { context._registeredComponents.add(moduleIdentifier) } } // used by ssr in case component is cached and beforeCreate // never gets called options._ssrRegister = hook } else if (injectStyles) { hook = shadowMode ? function () { injectStyles.call(this, this.$root.$options.shadowRoot) } : injectStyles } if (hook) { if (options.functional) { // for template-only hot-reload because in that case the render fn doesn't // go through the normalizer options._injectStyles = hook // register for functioal component in vue file var originalRender = options.render options.render = function renderWithStyleInjection(h, context) { hook.call(context) return originalRender(h, context) } } else { // inject component registration as beforeCreate hook var existing = options.beforeCreate options.beforeCreate = existing ? [].concat(existing, hook) : [hook] } } return { exports: scriptExports, options: options } } /***/ }), /***/ 11: /***/ (function (module, exports) { module.exports = require("element-ui/lib/input"); /***/ }), /***/ 12: /***/ (function (module, exports) { module.exports = require("element-ui/lib/utils/clickoutside"); /***/ }), /***/ 18: /***/ (function (module, exports) { module.exports = require("element-ui/lib/button"); /***/ }), /***/ 4: /***/ (function (module, exports) { module.exports = require("element-ui/lib/mixins/emitter"); /***/ }), /***/ 5: /***/ (function (module, exports) { module.exports = require("element-ui/lib/utils/vue-popper"); /***/ }), /***/ 53: /***/ (function (module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/main.vue?vue&type=template&id=55c8ade7& var render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { directives: [ { name: "clickoutside", rawName: "v-clickoutside", value: _vm.hide, expression: "hide" } ], class: [ "el-color-picker", _vm.colorDisabled ? "is-disabled" : "", _vm.colorSize ? "el-color-picker--" + _vm.colorSize : "" ] }, [ _vm.colorDisabled ? _c("div", {staticClass: "el-color-picker__mask"}) : _vm._e(), _c( "div", { staticClass: "el-color-picker__trigger", on: {click: _vm.handleTrigger} }, [ _c( "span", { staticClass: "el-color-picker__color", class: {"is-alpha": _vm.showAlpha} }, [ _c("span", { staticClass: "el-color-picker__color-inner", style: { backgroundColor: _vm.displayedColor } }), !_vm.value && !_vm.showPanelColor ? _c("span", { staticClass: "el-color-picker__empty el-icon-close" }) : _vm._e() ] ), _c("span", { directives: [ { name: "show", rawName: "v-show", value: _vm.value || _vm.showPanelColor, expression: "value || showPanelColor" } ], staticClass: "el-color-picker__icon el-icon-arrow-down" }) ] ), _c("picker-dropdown", { ref: "dropdown", class: ["el-color-picker__panel", _vm.popperClass || ""], attrs: { color: _vm.color, "show-alpha": _vm.showAlpha, predefine: _vm.predefine }, on: {pick: _vm.confirmValue, clear: _vm.clearValue}, model: { value: _vm.showPicker, callback: function ($$v) { _vm.showPicker = $$v }, expression: "showPicker" } }) ], 1 ) } var staticRenderFns = [] render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/main.vue?vue&type=template&id=55c8ade7& // CONCATENATED MODULE: ./packages/color-picker/src/color.js var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var hsv2hsl = function hsv2hsl(hue, sat, val) { return [hue, sat * val / ((hue = (2 - sat) * val) < 1 ? hue : 2 - hue) || 0, hue / 2]; }; // Need to handle 1.0 as 100%, since once it is a number, there is no difference between it and 1 // <http://stackoverflow.com/questions/7422072/javascript-how-to-detect-number-as-a-decimal-including-1-0> var isOnePointZero = function isOnePointZero(n) { return typeof n === 'string' && n.indexOf('.') !== -1 && parseFloat(n) === 1; }; var isPercentage = function isPercentage(n) { return typeof n === 'string' && n.indexOf('%') !== -1; }; // Take input from [0, n] and return it as [0, 1] var bound01 = function bound01(value, max) { if (isOnePointZero(value)) value = '100%'; var processPercent = isPercentage(value); value = Math.min(max, Math.max(0, parseFloat(value))); // Automatically convert percentage into number if (processPercent) { value = parseInt(value * max, 10) / 100; } // Handle floating point rounding errors if (Math.abs(value - max) < 0.000001) { return 1; } // Convert into [0, 1] range if it isn't already return value % max / parseFloat(max); }; var INT_HEX_MAP = {10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F'}; var toHex = function toHex(_ref) { var r = _ref.r, g = _ref.g, b = _ref.b; var hexOne = function hexOne(value) { value = Math.min(Math.round(value), 255); var high = Math.floor(value / 16); var low = value % 16; return '' + (INT_HEX_MAP[high] || high) + (INT_HEX_MAP[low] || low); }; if (isNaN(r) || isNaN(g) || isNaN(b)) return ''; return '#' + hexOne(r) + hexOne(g) + hexOne(b); }; var HEX_INT_MAP = {A: 10, B: 11, C: 12, D: 13, E: 14, F: 15}; var parseHexChannel = function parseHexChannel(hex) { if (hex.length === 2) { return (HEX_INT_MAP[hex[0].toUpperCase()] || +hex[0]) * 16 + (HEX_INT_MAP[hex[1].toUpperCase()] || +hex[1]); } return HEX_INT_MAP[hex[1].toUpperCase()] || +hex[1]; }; var hsl2hsv = function hsl2hsv(hue, sat, light) { sat = sat / 100; light = light / 100; var smin = sat; var lmin = Math.max(light, 0.01); var sv = void 0; var v = void 0; light *= 2; sat *= light <= 1 ? light : 2 - light; smin *= lmin <= 1 ? lmin : 2 - lmin; v = (light + sat) / 2; sv = light === 0 ? 2 * smin / (lmin + smin) : 2 * sat / (light + sat); return { h: hue, s: sv * 100, v: v * 100 }; }; // `rgbToHsv` // Converts an RGB color value to HSV // *Assumes:* r, g, and b are contained in the set [0, 255] or [0, 1] // *Returns:* { h, s, v } in [0,1] var rgb2hsv = function rgb2hsv(r, g, b) { r = bound01(r, 255); g = bound01(g, 255); b = bound01(b, 255); var max = Math.max(r, g, b); var min = Math.min(r, g, b); var h = void 0, s = void 0; var v = max; var d = max - min; s = max === 0 ? 0 : d / max; if (max === min) { h = 0; // achromatic } else { switch (max) { case r: h = (g - b) / d + (g < b ? 6 : 0); break; case g: h = (b - r) / d + 2; break; case b: h = (r - g) / d + 4; break; } h /= 6; } return {h: h * 360, s: s * 100, v: v * 100}; }; // `hsvToRgb` // Converts an HSV color value to RGB. // *Assumes:* h is contained in [0, 1] or [0, 360] and s and v are contained in [0, 1] or [0, 100] // *Returns:* { r, g, b } in the set [0, 255] var hsv2rgb = function hsv2rgb(h, s, v) { h = bound01(h, 360) * 6; s = bound01(s, 100); v = bound01(v, 100); var i = Math.floor(h); var f = h - i; var p = v * (1 - s); var q = v * (1 - f * s); var t = v * (1 - (1 - f) * s); var mod = i % 6; var r = [v, q, p, p, t, v][mod]; var g = [t, v, v, q, p, p][mod]; var b = [p, p, t, v, v, q][mod]; return { r: Math.round(r * 255), g: Math.round(g * 255), b: Math.round(b * 255) }; }; var Color = function () { function Color(options) { _classCallCheck(this, Color); this._hue = 0; this._saturation = 100; this._value = 100; this._alpha = 100; this.enableAlpha = false; this.format = 'hex'; this.value = ''; options = options || {}; for (var option in options) { if (options.hasOwnProperty(option)) { this[option] = options[option]; } } this.doOnChange(); } Color.prototype.set = function set(prop, value) { if (arguments.length === 1 && (typeof prop === 'undefined' ? 'undefined' : _typeof(prop)) === 'object') { for (var p in prop) { if (prop.hasOwnProperty(p)) { this.set(p, prop[p]); } } return; } this['_' + prop] = value; this.doOnChange(); }; Color.prototype.get = function get(prop) { return this['_' + prop]; }; Color.prototype.toRgb = function toRgb() { return hsv2rgb(this._hue, this._saturation, this._value); }; Color.prototype.fromString = function fromString(value) { var _this = this; if (!value) { this._hue = 0; this._saturation = 100; this._value = 100; this.doOnChange(); return; } var fromHSV = function fromHSV(h, s, v) { _this._hue = Math.max(0, Math.min(360, h)); _this._saturation = Math.max(0, Math.min(100, s)); _this._value = Math.max(0, Math.min(100, v)); _this.doOnChange(); }; if (value.indexOf('hsl') !== -1) { var parts = value.replace(/hsla|hsl|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (parts.length === 4) { this._alpha = Math.floor(parseFloat(parts[3]) * 100); } else if (parts.length === 3) { this._alpha = 100; } if (parts.length >= 3) { var _hsl2hsv = hsl2hsv(parts[0], parts[1], parts[2]), h = _hsl2hsv.h, s = _hsl2hsv.s, v = _hsl2hsv.v; fromHSV(h, s, v); } } else if (value.indexOf('hsv') !== -1) { var _parts = value.replace(/hsva|hsv|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (_parts.length === 4) { this._alpha = Math.floor(parseFloat(_parts[3]) * 100); } else if (_parts.length === 3) { this._alpha = 100; } if (_parts.length >= 3) { fromHSV(_parts[0], _parts[1], _parts[2]); } } else if (value.indexOf('rgb') !== -1) { var _parts2 = value.replace(/rgba|rgb|\(|\)/gm, '').split(/\s|,/g).filter(function (val) { return val !== ''; }).map(function (val, index) { return index > 2 ? parseFloat(val) : parseInt(val, 10); }); if (_parts2.length === 4) { this._alpha = Math.floor(parseFloat(_parts2[3]) * 100); } else if (_parts2.length === 3) { this._alpha = 100; } if (_parts2.length >= 3) { var _rgb2hsv = rgb2hsv(_parts2[0], _parts2[1], _parts2[2]), _h = _rgb2hsv.h, _s = _rgb2hsv.s, _v = _rgb2hsv.v; fromHSV(_h, _s, _v); } } else if (value.indexOf('#') !== -1) { var hex = value.replace('#', '').trim(); if (!/^(?:[0-9a-fA-F]{3}){1,2}$/.test(hex)) return; var r = void 0, g = void 0, b = void 0; if (hex.length === 3) { r = parseHexChannel(hex[0] + hex[0]); g = parseHexChannel(hex[1] + hex[1]); b = parseHexChannel(hex[2] + hex[2]); } else if (hex.length === 6 || hex.length === 8) { r = parseHexChannel(hex.substring(0, 2)); g = parseHexChannel(hex.substring(2, 4)); b = parseHexChannel(hex.substring(4, 6)); } if (hex.length === 8) { this._alpha = Math.floor(parseHexChannel(hex.substring(6)) / 255 * 100); } else if (hex.length === 3 || hex.length === 6) { this._alpha = 100; } var _rgb2hsv2 = rgb2hsv(r, g, b), _h2 = _rgb2hsv2.h, _s2 = _rgb2hsv2.s, _v2 = _rgb2hsv2.v; fromHSV(_h2, _s2, _v2); } }; Color.prototype.compare = function compare(color) { return Math.abs(color._hue - this._hue) < 2 && Math.abs(color._saturation - this._saturation) < 1 && Math.abs(color._value - this._value) < 1 && Math.abs(color._alpha - this._alpha) < 1; }; Color.prototype.doOnChange = function doOnChange() { var _hue = this._hue, _saturation = this._saturation, _value = this._value, _alpha = this._alpha, format = this.format; if (this.enableAlpha) { switch (format) { case 'hsl': var hsl = hsv2hsl(_hue, _saturation / 100, _value / 100); this.value = 'hsla(' + _hue + ', ' + Math.round(hsl[1] * 100) + '%, ' + Math.round(hsl[2] * 100) + '%, ' + _alpha / 100 + ')'; break; case 'hsv': this.value = 'hsva(' + _hue + ', ' + Math.round(_saturation) + '%, ' + Math.round(_value) + '%, ' + _alpha / 100 + ')'; break; default: var _hsv2rgb = hsv2rgb(_hue, _saturation, _value), r = _hsv2rgb.r, g = _hsv2rgb.g, b = _hsv2rgb.b; this.value = 'rgba(' + r + ', ' + g + ', ' + b + ', ' + _alpha / 100 + ')'; } } else { switch (format) { case 'hsl': var _hsl = hsv2hsl(_hue, _saturation / 100, _value / 100); this.value = 'hsl(' + _hue + ', ' + Math.round(_hsl[1] * 100) + '%, ' + Math.round(_hsl[2] * 100) + '%)'; break; case 'hsv': this.value = 'hsv(' + _hue + ', ' + Math.round(_saturation) + '%, ' + Math.round(_value) + '%)'; break; case 'rgb': var _hsv2rgb2 = hsv2rgb(_hue, _saturation, _value), _r = _hsv2rgb2.r, _g = _hsv2rgb2.g, _b = _hsv2rgb2.b; this.value = 'rgb(' + _r + ', ' + _g + ', ' + _b + ')'; break; default: this.value = toHex(hsv2rgb(_hue, _saturation, _value)); } } }; return Color; }(); /* harmony default export */ var src_color = (Color); // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/picker-dropdown.vue?vue&type=template&id=06601625& var picker_dropdownvue_type_template_id_06601625_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "transition", {attrs: {name: "el-zoom-in-top"}, on: {"after-leave": _vm.doDestroy}}, [ _c( "div", { directives: [ { name: "show", rawName: "v-show", value: _vm.showPopper, expression: "showPopper" } ], staticClass: "el-color-dropdown" }, [ _c( "div", {staticClass: "el-color-dropdown__main-wrapper"}, [ _c("hue-slider", { ref: "hue", staticStyle: {float: "right"}, attrs: {color: _vm.color, vertical: ""} }), _c("sv-panel", {ref: "sl", attrs: {color: _vm.color}}) ], 1 ), _vm.showAlpha ? _c("alpha-slider", {ref: "alpha", attrs: {color: _vm.color}}) : _vm._e(), _vm.predefine ? _c("predefine", { attrs: {color: _vm.color, colors: _vm.predefine} }) : _vm._e(), _c( "div", {staticClass: "el-color-dropdown__btns"}, [ _c( "span", {staticClass: "el-color-dropdown__value"}, [ _c("el-input", { attrs: {"validate-event": false, size: "mini"}, on: {blur: _vm.handleConfirm}, nativeOn: { keyup: function ($event) { if ( !("button" in $event) && _vm._k( $event.keyCode, "enter", 13, $event.key, "Enter" ) ) { return null } return _vm.handleConfirm($event) } }, model: { value: _vm.customInput, callback: function ($$v) { _vm.customInput = $$v }, expression: "customInput" } }) ], 1 ), _c( "el-button", { staticClass: "el-color-dropdown__link-btn", attrs: {size: "mini", type: "text"}, on: { click: function ($event) { _vm.$emit("clear") } } }, [ _vm._v( "\n " + _vm._s(_vm.t("el.colorpicker.clear")) + "\n " ) ] ), _c( "el-button", { staticClass: "el-color-dropdown__btn", attrs: {plain: "", size: "mini"}, on: {click: _vm.confirmValue} }, [ _vm._v( "\n " + _vm._s(_vm.t("el.colorpicker.confirm")) + "\n " ) ] ) ], 1 ) ], 1 ) ] ) } var picker_dropdownvue_type_template_id_06601625_staticRenderFns = [] picker_dropdownvue_type_template_id_06601625_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue?vue&type=template&id=06601625& // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/sv-panel.vue?vue&type=template&id=d8583596& var sv_panelvue_type_template_id_d8583596_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-svpanel", style: { backgroundColor: _vm.background } }, [ _c("div", {staticClass: "el-color-svpanel__white"}), _c("div", {staticClass: "el-color-svpanel__black"}), _c( "div", { staticClass: "el-color-svpanel__cursor", style: { top: _vm.cursorTop + "px", left: _vm.cursorLeft + "px" } }, [_c("div")] ) ] ) } var sv_panelvue_type_template_id_d8583596_staticRenderFns = [] sv_panelvue_type_template_id_d8583596_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue?vue&type=template&id=d8583596& // EXTERNAL MODULE: external "vue" var external_vue_ = __webpack_require__(7); var external_vue_default = /*#__PURE__*/__webpack_require__.n(external_vue_); // CONCATENATED MODULE: ./packages/color-picker/src/draggable.js var isDragging = false; /* harmony default export */ var draggable = (function (element, options) { if (external_vue_default.a.prototype.$isServer) return; var moveFn = function moveFn(event) { if (options.drag) { options.drag(event); } }; var upFn = function upFn(event) { document.removeEventListener('mousemove', moveFn); document.removeEventListener('mouseup', upFn); document.onselectstart = null; document.ondragstart = null; isDragging = false; if (options.end) { options.end(event); } }; element.addEventListener('mousedown', function (event) { if (isDragging) return; document.onselectstart = function () { return false; }; document.ondragstart = function () { return false; }; document.addEventListener('mousemove', moveFn); document.addEventListener('mouseup', upFn); isDragging = true; if (options.start) { options.start(event); } }); }); // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/sv-panel.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // /* harmony default export */ var sv_panelvue_type_script_lang_js_ = ({ name: 'el-sl-panel', props: { color: { required: true } }, computed: { colorValue: function colorValue() { var hue = this.color.get('hue'); var value = this.color.get('value'); return {hue: hue, value: value}; } }, watch: { colorValue: function colorValue() { this.update(); } }, methods: { update: function update() { var saturation = this.color.get('saturation'); var value = this.color.get('value'); var el = this.$el; var width = el.clientWidth, height = el.clientHeight; this.cursorLeft = saturation * width / 100; this.cursorTop = (100 - value) * height / 100; this.background = 'hsl(' + this.color.get('hue') + ', 100%, 50%)'; }, handleDrag: function handleDrag(event) { var el = this.$el; var rect = el.getBoundingClientRect(); var left = event.clientX - rect.left; var top = event.clientY - rect.top; left = Math.max(0, left); left = Math.min(left, rect.width); top = Math.max(0, top); top = Math.min(top, rect.height); this.cursorLeft = left; this.cursorTop = top; this.color.set({ saturation: left / rect.width * 100, value: 100 - top / rect.height * 100 }); } }, mounted: function mounted() { var _this = this; draggable(this.$el, { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }); this.update(); }, data: function data() { return { cursorTop: 0, cursorLeft: 0, background: 'hsl(0, 100%, 50%)' }; } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue?vue&type=script&lang=js& /* harmony default export */ var components_sv_panelvue_type_script_lang_js_ = (sv_panelvue_type_script_lang_js_); // EXTERNAL MODULE: ./node_modules/[email protected]@vue-loader/lib/runtime/componentNormalizer.js var componentNormalizer = __webpack_require__(0); // CONCATENATED MODULE: ./packages/color-picker/src/components/sv-panel.vue /* normalize component */ var component = Object(componentNormalizer["a" /* default */])( components_sv_panelvue_type_script_lang_js_, sv_panelvue_type_template_id_d8583596_render, sv_panelvue_type_template_id_d8583596_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var api; } component.options.__file = "packages/color-picker/src/components/sv-panel.vue" /* harmony default export */ var sv_panel = (component.exports); // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/hue-slider.vue?vue&type=template&id=5cdc43b1& var hue_slidervue_type_template_id_5cdc43b1_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-hue-slider", class: {"is-vertical": _vm.vertical} }, [ _c("div", { ref: "bar", staticClass: "el-color-hue-slider__bar", on: {click: _vm.handleClick} }), _c("div", { ref: "thumb", staticClass: "el-color-hue-slider__thumb", style: { left: _vm.thumbLeft + "px", top: _vm.thumbTop + "px" } }) ] ) } var hue_slidervue_type_template_id_5cdc43b1_staticRenderFns = [] hue_slidervue_type_template_id_5cdc43b1_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue?vue&type=template&id=5cdc43b1& // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/hue-slider.vue?vue&type=script&lang=js& // // // // // // // // // // // // // /* harmony default export */ var hue_slidervue_type_script_lang_js_ = ({ name: 'el-color-hue-slider', props: { color: { required: true }, vertical: Boolean }, data: function data() { return { thumbLeft: 0, thumbTop: 0 }; }, computed: { hueValue: function hueValue() { var hue = this.color.get('hue'); return hue; } }, watch: { hueValue: function hueValue() { this.update(); } }, methods: { handleClick: function handleClick(event) { var thumb = this.$refs.thumb; var target = event.target; if (target !== thumb) { this.handleDrag(event); } }, handleDrag: function handleDrag(event) { var rect = this.$el.getBoundingClientRect(); var thumb = this.$refs.thumb; var hue = void 0; if (!this.vertical) { var left = event.clientX - rect.left; left = Math.min(left, rect.width - thumb.offsetWidth / 2); left = Math.max(thumb.offsetWidth / 2, left); hue = Math.round((left - thumb.offsetWidth / 2) / (rect.width - thumb.offsetWidth) * 360); } else { var top = event.clientY - rect.top; top = Math.min(top, rect.height - thumb.offsetHeight / 2); top = Math.max(thumb.offsetHeight / 2, top); hue = Math.round((top - thumb.offsetHeight / 2) / (rect.height - thumb.offsetHeight) * 360); } this.color.set('hue', hue); }, getThumbLeft: function getThumbLeft() { if (this.vertical) return 0; var el = this.$el; var hue = this.color.get('hue'); if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(hue * (el.offsetWidth - thumb.offsetWidth / 2) / 360); }, getThumbTop: function getThumbTop() { if (!this.vertical) return 0; var el = this.$el; var hue = this.color.get('hue'); if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(hue * (el.offsetHeight - thumb.offsetHeight / 2) / 360); }, update: function update() { this.thumbLeft = this.getThumbLeft(); this.thumbTop = this.getThumbTop(); } }, mounted: function mounted() { var _this = this; var _$refs = this.$refs, bar = _$refs.bar, thumb = _$refs.thumb; var dragConfig = { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }; draggable(bar, dragConfig); draggable(thumb, dragConfig); this.update(); } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue?vue&type=script&lang=js& /* harmony default export */ var components_hue_slidervue_type_script_lang_js_ = (hue_slidervue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/hue-slider.vue /* normalize component */ var hue_slider_component = Object(componentNormalizer["a" /* default */])( components_hue_slidervue_type_script_lang_js_, hue_slidervue_type_template_id_5cdc43b1_render, hue_slidervue_type_template_id_5cdc43b1_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var hue_slider_api; } hue_slider_component.options.__file = "packages/color-picker/src/components/hue-slider.vue" /* harmony default export */ var hue_slider = (hue_slider_component.exports); // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/alpha-slider.vue?vue&type=template&id=068c66cb& var alpha_slidervue_type_template_id_068c66cb_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c( "div", { staticClass: "el-color-alpha-slider", class: {"is-vertical": _vm.vertical} }, [ _c("div", { ref: "bar", staticClass: "el-color-alpha-slider__bar", style: { background: _vm.background }, on: {click: _vm.handleClick} }), _c("div", { ref: "thumb", staticClass: "el-color-alpha-slider__thumb", style: { left: _vm.thumbLeft + "px", top: _vm.thumbTop + "px" } }) ] ) } var alpha_slidervue_type_template_id_068c66cb_staticRenderFns = [] alpha_slidervue_type_template_id_068c66cb_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue?vue&type=template&id=068c66cb& // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/alpha-slider.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // /* harmony default export */ var alpha_slidervue_type_script_lang_js_ = ({ name: 'el-color-alpha-slider', props: { color: { required: true }, vertical: Boolean }, watch: { 'color._alpha': function color_alpha() { this.update(); }, 'color.value': function colorValue() { this.update(); } }, methods: { handleClick: function handleClick(event) { var thumb = this.$refs.thumb; var target = event.target; if (target !== thumb) { this.handleDrag(event); } }, handleDrag: function handleDrag(event) { var rect = this.$el.getBoundingClientRect(); var thumb = this.$refs.thumb; if (!this.vertical) { var left = event.clientX - rect.left; left = Math.max(thumb.offsetWidth / 2, left); left = Math.min(left, rect.width - thumb.offsetWidth / 2); this.color.set('alpha', Math.round((left - thumb.offsetWidth / 2) / (rect.width - thumb.offsetWidth) * 100)); } else { var top = event.clientY - rect.top; top = Math.max(thumb.offsetHeight / 2, top); top = Math.min(top, rect.height - thumb.offsetHeight / 2); this.color.set('alpha', Math.round((top - thumb.offsetHeight / 2) / (rect.height - thumb.offsetHeight) * 100)); } }, getThumbLeft: function getThumbLeft() { if (this.vertical) return 0; var el = this.$el; var alpha = this.color._alpha; if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(alpha * (el.offsetWidth - thumb.offsetWidth / 2) / 100); }, getThumbTop: function getThumbTop() { if (!this.vertical) return 0; var el = this.$el; var alpha = this.color._alpha; if (!el) return 0; var thumb = this.$refs.thumb; return Math.round(alpha * (el.offsetHeight - thumb.offsetHeight / 2) / 100); }, getBackground: function getBackground() { if (this.color && this.color.value) { var _color$toRgb = this.color.toRgb(), r = _color$toRgb.r, g = _color$toRgb.g, b = _color$toRgb.b; return 'linear-gradient(to right, rgba(' + r + ', ' + g + ', ' + b + ', 0) 0%, rgba(' + r + ', ' + g + ', ' + b + ', 1) 100%)'; } return null; }, update: function update() { this.thumbLeft = this.getThumbLeft(); this.thumbTop = this.getThumbTop(); this.background = this.getBackground(); } }, data: function data() { return { thumbLeft: 0, thumbTop: 0, background: null }; }, mounted: function mounted() { var _this = this; var _$refs = this.$refs, bar = _$refs.bar, thumb = _$refs.thumb; var dragConfig = { drag: function drag(event) { _this.handleDrag(event); }, end: function end(event) { _this.handleDrag(event); } }; draggable(bar, dragConfig); draggable(thumb, dragConfig); this.update(); } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue?vue&type=script&lang=js& /* harmony default export */ var components_alpha_slidervue_type_script_lang_js_ = (alpha_slidervue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/alpha-slider.vue /* normalize component */ var alpha_slider_component = Object(componentNormalizer["a" /* default */])( components_alpha_slidervue_type_script_lang_js_, alpha_slidervue_type_template_id_068c66cb_render, alpha_slidervue_type_template_id_068c66cb_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var alpha_slider_api; } alpha_slider_component.options.__file = "packages/color-picker/src/components/alpha-slider.vue" /* harmony default export */ var alpha_slider = (alpha_slider_component.exports); // CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/predefine.vue?vue&type=template&id=06e03093& var predefinevue_type_template_id_06e03093_render = function () { var _vm = this var _h = _vm.$createElement var _c = _vm._self._c || _h return _c("div", {staticClass: "el-color-predefine"}, [ _c( "div", {staticClass: "el-color-predefine__colors"}, _vm._l(_vm.rgbaColors, function (item, index) { return _c( "div", { key: _vm.colors[index], staticClass: "el-color-predefine__color-selector", class: {selected: item.selected, "is-alpha": item._alpha < 100}, on: { click: function ($event) { _vm.handleSelect(index) } } }, [_c("div", {style: {"background-color": item.value}})] ) }), 0 ) ]) } var predefinevue_type_template_id_06e03093_staticRenderFns = [] predefinevue_type_template_id_06e03093_render._withStripped = true // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue?vue&type=template&id=06e03093& // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/predefine.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // /* harmony default export */ var predefinevue_type_script_lang_js_ = ({ props: { colors: {type: Array, required: true}, color: {required: true} }, data: function data() { return { rgbaColors: this.parseColors(this.colors, this.color) }; }, methods: { handleSelect: function handleSelect(index) { this.color.fromString(this.colors[index]); }, parseColors: function parseColors(colors, color) { return colors.map(function (value) { var c = new src_color(); c.enableAlpha = true; c.format = 'rgba'; c.fromString(value); c.selected = c.value === color.value; return c; }); } }, watch: { '$parent.currentColor': function $parentCurrentColor(val) { var color = new src_color(); color.fromString(val); this.rgbaColors.forEach(function (item) { item.selected = color.compare(item); }); }, colors: function colors(newVal) { this.rgbaColors = this.parseColors(newVal, this.color); }, color: function color(newVal) { this.rgbaColors = this.parseColors(this.colors, newVal); } } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue?vue&type=script&lang=js& /* harmony default export */ var components_predefinevue_type_script_lang_js_ = (predefinevue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/predefine.vue /* normalize component */ var predefine_component = Object(componentNormalizer["a" /* default */])( components_predefinevue_type_script_lang_js_, predefinevue_type_template_id_06e03093_render, predefinevue_type_template_id_06e03093_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var predefine_api; } predefine_component.options.__file = "packages/color-picker/src/components/predefine.vue" /* harmony default export */ var predefine = (predefine_component.exports); // EXTERNAL MODULE: external "element-ui/lib/utils/vue-popper" var vue_popper_ = __webpack_require__(5); var vue_popper_default = /*#__PURE__*/__webpack_require__.n(vue_popper_); // EXTERNAL MODULE: external "element-ui/lib/mixins/locale" var locale_ = __webpack_require__(6); var locale_default = /*#__PURE__*/__webpack_require__.n(locale_); // EXTERNAL MODULE: external "element-ui/lib/input" var input_ = __webpack_require__(11); var input_default = /*#__PURE__*/__webpack_require__.n(input_); // EXTERNAL MODULE: external "element-ui/lib/button" var button_ = __webpack_require__(18); var button_default = /*#__PURE__*/__webpack_require__.n(button_); // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/components/picker-dropdown.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // /* harmony default export */ var picker_dropdownvue_type_script_lang_js_ = ({ name: 'el-color-picker-dropdown', mixins: [vue_popper_default.a, locale_default.a], components: { SvPanel: sv_panel, HueSlider: hue_slider, AlphaSlider: alpha_slider, ElInput: input_default.a, ElButton: button_default.a, Predefine: predefine }, props: { color: { required: true }, showAlpha: Boolean, predefine: Array }, data: function data() { return { customInput: '' }; }, computed: { currentColor: function currentColor() { var parent = this.$parent; return !parent.value && !parent.showPanelColor ? '' : parent.color.value; } }, methods: { confirmValue: function confirmValue() { this.$emit('pick'); }, handleConfirm: function handleConfirm() { this.color.fromString(this.customInput); } }, mounted: function mounted() { this.$parent.popperElm = this.popperElm = this.$el; this.referenceElm = this.$parent.$el; }, watch: { showPopper: function showPopper(val) { var _this = this; if (val === true) { this.$nextTick(function () { var _$refs = _this.$refs, sl = _$refs.sl, hue = _$refs.hue, alpha = _$refs.alpha; sl && sl.update(); hue && hue.update(); alpha && alpha.update(); }); } }, currentColor: { immediate: true, handler: function handler(val) { this.customInput = val; } } } }); // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue?vue&type=script&lang=js& /* harmony default export */ var components_picker_dropdownvue_type_script_lang_js_ = (picker_dropdownvue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/components/picker-dropdown.vue /* normalize component */ var picker_dropdown_component = Object(componentNormalizer["a" /* default */])( components_picker_dropdownvue_type_script_lang_js_, picker_dropdownvue_type_template_id_06601625_render, picker_dropdownvue_type_template_id_06601625_staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var picker_dropdown_api; } picker_dropdown_component.options.__file = "packages/color-picker/src/components/picker-dropdown.vue" /* harmony default export */ var picker_dropdown = (picker_dropdown_component.exports); // EXTERNAL MODULE: external "element-ui/lib/utils/clickoutside" var clickoutside_ = __webpack_require__(12); var clickoutside_default = /*#__PURE__*/__webpack_require__.n(clickoutside_); // EXTERNAL MODULE: external "element-ui/lib/mixins/emitter" var emitter_ = __webpack_require__(4); var emitter_default = /*#__PURE__*/__webpack_require__.n(emitter_); // CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/color-picker/src/main.vue?vue&type=script&lang=js& // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // /* harmony default export */ var mainvue_type_script_lang_js_ = ({ name: 'ElColorPicker', mixins: [emitter_default.a], props: { value: String, showAlpha: Boolean, colorFormat: String, disabled: Boolean, size: String, popperClass: String, predefine: Array }, inject: { elForm: { default: '' }, elFormItem: { default: '' } }, directives: {Clickoutside: clickoutside_default.a}, computed: { displayedColor: function displayedColor() { if (!this.value && !this.showPanelColor) { return 'transparent'; } return this.displayedRgb(this.color, this.showAlpha); }, _elFormItemSize: function _elFormItemSize() { return (this.elFormItem || {}).elFormItemSize; }, colorSize: function colorSize() { return this.size || this._elFormItemSize || (this.$ELEMENT || {}).size; }, colorDisabled: function colorDisabled() { return this.disabled || (this.elForm || {}).disabled; } }, watch: { value: function value(val) { if (!val) { this.showPanelColor = false; } else if (val && val !== this.color.value) { this.color.fromString(val); } }, color: { deep: true, handler: function handler() { this.showPanelColor = true; } }, displayedColor: function displayedColor(val) { if (!this.showPicker) return; var currentValueColor = new src_color({ enableAlpha: this.showAlpha, format: this.colorFormat }); currentValueColor.fromString(this.value); var currentValueColorRgb = this.displayedRgb(currentValueColor, this.showAlpha); if (val !== currentValueColorRgb) { this.$emit('active-change', val); } } }, methods: { handleTrigger: function handleTrigger() { if (this.colorDisabled) return; this.showPicker = !this.showPicker; }, confirmValue: function confirmValue() { var value = this.color.value; this.$emit('input', value); this.$emit('change', value); this.dispatch('ElFormItem', 'el.form.change', value); this.showPicker = false; }, clearValue: function clearValue() { this.$emit('input', null); this.$emit('change', null); if (this.value !== null) { this.dispatch('ElFormItem', 'el.form.change', null); } this.showPanelColor = false; this.showPicker = false; this.resetColor(); }, hide: function hide() { this.showPicker = false; this.resetColor(); }, resetColor: function resetColor() { var _this = this; this.$nextTick(function (_) { if (_this.value) { _this.color.fromString(_this.value); } else { _this.showPanelColor = false; } }); }, displayedRgb: function displayedRgb(color, showAlpha) { if (!(color instanceof src_color)) { throw Error('color should be instance of Color Class'); } var _color$toRgb = color.toRgb(), r = _color$toRgb.r, g = _color$toRgb.g, b = _color$toRgb.b; return showAlpha ? 'rgba(' + r + ', ' + g + ', ' + b + ', ' + color.get('alpha') / 100 + ')' : 'rgb(' + r + ', ' + g + ', ' + b + ')'; } }, mounted: function mounted() { var value = this.value; if (value) { this.color.fromString(value); } this.popperElm = this.$refs.dropdown.$el; }, data: function data() { var color = new src_color({ enableAlpha: this.showAlpha, format: this.colorFormat }); return { color: color, showPicker: false, showPanelColor: false }; }, components: { PickerDropdown: picker_dropdown } }); // CONCATENATED MODULE: ./packages/color-picker/src/main.vue?vue&type=script&lang=js& /* harmony default export */ var src_mainvue_type_script_lang_js_ = (mainvue_type_script_lang_js_); // CONCATENATED MODULE: ./packages/color-picker/src/main.vue /* normalize component */ var main_component = Object(componentNormalizer["a" /* default */])( src_mainvue_type_script_lang_js_, render, staticRenderFns, false, null, null, null ) /* hot reload */ if (false) { var main_api; } main_component.options.__file = "packages/color-picker/src/main.vue" /* harmony default export */ var main = (main_component.exports); // CONCATENATED MODULE: ./packages/color-picker/index.js /* istanbul ignore next */ main.install = function (Vue) { Vue.component(main.name, main); }; /* harmony default export */ var color_picker = __webpack_exports__["default"] = (main); /***/ }), /***/ 6: /***/ (function (module, exports) { module.exports = require("element-ui/lib/mixins/locale"); /***/ }), /***/ 7: /***/ (function (module, exports) { module.exports = require("vue"); /***/ }) /******/ });
PypiClean
/Flask-Reuploaded-1.3.0.tar.gz/Flask-Reuploaded-1.3.0/README.rst
.. image:: https://github.com/jugmac00/flask-reuploaded/workflows/CI/badge.svg?branch=master :target: https://github.com/jugmac00/flask-reuploaded/actions?workflow=CI :alt: CI Status .. image:: https://coveralls.io/repos/github/jugmac00/flask-reuploaded/badge.svg?branch=master :target: https://coveralls.io/github/jugmac00/flask-reuploaded?branch=master .. image:: https://img.shields.io/pypi/v/flask-reuploaded :alt: PyPI :target: https://github.com/jugmac00/flask-reuploaded .. image:: https://img.shields.io/pypi/pyversions/flask-reuploaded :alt: PyPI - Python Version :target: https://pypi.org/project/Flask-Reuploaded/ .. image:: https://img.shields.io/pypi/l/hibpcli :target: https://github.com/jugmac00/flask-reuploaded/blob/master/LICENSE Flask-Reuploaded ================ Flask-Reuploaded provides file uploads for Flask. Notes on this package --------------------- This is an independently maintained version of `Flask-Uploads` based on the 0.2.1 version of the original, but also including four years of unreleased changes, at least not released to PyPI. Noteworthy is the fix for the `Werkzeug` API change. Goals ----- - provide a stable drop-in replacement for `Flask-Uploads` - regain momentum for this widely used package - provide working PyPI packages Migration guide from `Flask-Uploads` ------------------------------------ Incompatibilities between Flask-Reuploaded and Flask-Uploads ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As already mentioned, staying compatible with `Flask-Uploads` is one of this project's goals. Nevertheless, there are the following known incompatibilities: - the `patch_request_class` helper function has been removed; the function was only necessary for Flask 0.6 and earlier. Since then you can use Flask's own `MAX_CONTENT_LENGTH <https://flask.palletsprojects.com/en/1.1.x/config/#MAX_CONTENT_LENGTH>`_ environment variable, so you don’t read more than this many bytes from the incoming request data. - `autoserve` of uploaded images now has been deactivated; this was a poorly documented "feature", which even could have lead to unwanted data disclosure; if you want to activate the feature again, you need to set `UPLOADS_AUTOSERVE=True` Uninstall and install ~~~~~~~~~~~~~~~~~~~~~ If you have used `Flask-Uploads` and want to migrate to `Flask-Reuploaded`, you only have to install `Flask-Reuploaded` instead of `Flask-Uploads`. That's all! So, if you use `pip` to install your packages, instead of ... .. code-block:: bash $ pip install `Flask-Uploads` # don't do this! package is broken ... just do ... .. code-block:: bash $ pip install `Flask-Reuploaded` `Flask-Reuploaded` is a drop-in replacement. This means you do not have to change a single line of code. Installation ------------ .. code-block:: bash $ pip install Flask-Reuploaded Getting started --------------- create an UploadSet .. code-block:: python from flask_uploads import IMAGES photos = UploadSet("photos", IMAGES) configure your Flask app and this extension .. code-block:: python app.config["UPLOADED_PHOTOS_DEST"] = "static/img" app.config["SECRET_KEY"] = os.urandom(24) configure_uploads(app, photos) use `photos` in your view function .. code-block:: python photos.save(request.files['photo']) See below for a complete example. Documentation ------------- You can find the documentation at: https://flask-reuploaded.readthedocs.io/en/latest/ You can generate the documentation locally: .. code-block:: bash tox -e docs You can update the dependencies for documentation generation: .. code-block:: bash tox -e upgradedocs Minimal example application ---------------------------- Application code, e.g. main.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: python import os from flask import Flask, flash, render_template, request # please note the import from `flask_uploads` - not `flask_reuploaded`!! # this is done on purpose to stay compatible with `Flask-Uploads` from flask_uploads import IMAGES, UploadSet, configure_uploads app = Flask(__name__) photos = UploadSet("photos", IMAGES) app.config["UPLOADED_PHOTOS_DEST"] = "static/img" app.config["SECRET_KEY"] = os.urandom(24) configure_uploads(app, photos) @app.route("/", methods=['GET', 'POST']) def upload(): if request.method == 'POST' and 'photo' in request.files: photos.save(request.files['photo']) flash("Photo saved successfully.") return render_template('upload.html') return render_template('upload.html') HTML code for `upload.html` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code-block:: html <!doctype html> <html lang=en> <head> <meta charset=utf-8> <title>Flask-Reuploaded Example</title> </head> <body> {% with messages = get_flashed_messages() %} {% if messages %} <ul class=flashes> {% for message in messages %} <li>{{ message }}</li> {% endfor %} </ul> {% endif %} {% endwith %} <form method=POST enctype=multipart/form-data action="{{ url_for('upload') }}"> <input type=file name=photo> <button type="submit">Submit</button> </form> </body> </html> Project structure ~~~~~~~~~~~~~~~~~ The project structure would look as following... .. code-block:: bash ❯ tree -I "__*|h*" . ├── main.py ├── static │ └── img └── templates └── upload.html Running the example application ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In order to run the application, you have to enter the following commands... .. code-block:: bash ❯ export FLASK_APP=main.py ❯ flask run Then point your browser to `http://127.0.0.1:5000/`. Contributing ------------ Contributions are more than welcome. Please have a look at the `open issues <https://github.com/jugmac00/flask-reuploaded/issues>`_. There is also a `short contributing guide <https://github.com/jugmac00/flask-reuploaded/blob/master/CONTRIBUTING.rst>`_.
PypiClean
/Finance-Ultron-1.0.8.1.tar.gz/Finance-Ultron-1.0.8.1/ultron/factor/data/neutralize.py
import numpy as np import numba as nb from typing import Tuple from typing import Union from typing import Dict from . utilities import groupby def neutralize(x: np.ndarray, y: np.ndarray, groups: np.ndarray=None, detail: bool=False, weights: np.ndarray = None) \ -> Union[np.ndarray, Tuple[np.ndarray, Dict]]: if y.ndim == 1: y = y.reshape((-1, 1)) if weights is None: weights = np.ones(len(y), dtype=float) output_dict = {} if detail: exposure = np.zeros(x.shape + (y.shape[1],)) explained = np.zeros(x.shape + (y.shape[1],)) output_dict['exposure'] = exposure output_dict['explained'] = explained if groups is not None: res = np.zeros(y.shape) index_diff, order = groupby(groups) start = 0 if detail: for diff_loc in index_diff: curr_idx = order[start:diff_loc + 1] curr_x, b = _sub_step(x, y, weights, curr_idx, res) exposure[curr_idx, :, :] = b explained[curr_idx] = ls_explain(curr_x, b) start = diff_loc + 1 else: for diff_loc in index_diff: curr_idx = order[start:diff_loc + 1] _sub_step(x, y, weights, curr_idx, res) start = diff_loc + 1 else: try: b = ls_fit(x, y, weights) except np.linalg.linalg.LinAlgError: b = ls_fit_pinv(x, y, weights) res = ls_res(x, y, b) if detail: explained[:, :, :] = ls_explain(x, b) exposure[:] = b if output_dict: return res, output_dict else: return res def _sub_step(x, y, w, curr_idx, res) -> Tuple[np.ndarray, np.ndarray]: curr_x, curr_y, curr_w = x[curr_idx], y[curr_idx], w[curr_idx] try: b = ls_fit(curr_x, curr_y, curr_w) except np.linalg.linalg.LinAlgError: b = ls_fit_pinv(curr_x, curr_y, curr_w) res[curr_idx] = ls_res(curr_x, curr_y, b) return curr_x, b @nb.njit(nogil=True, cache=True) def ls_fit(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> np.ndarray: x_bar = x.T * w b = np.linalg.solve(x_bar @ x, x_bar @ y) return b @nb.njit(nogil=True, cache=True) def ls_fit_pinv(x: np.ndarray, y: np.ndarray, w: np.ndarray) -> np.ndarray: x_bar = x.T * w b = np.linalg.pinv(x_bar @ x) @ x_bar @ y return b @nb.njit(nogil=True, cache=True) def ls_res(x: np.ndarray, y: np.ndarray, b: np.ndarray) -> np.ndarray: return y - x @ b @nb.njit(nogil=True, cache=True) def ls_explain(x: np.ndarray, b: np.ndarray) -> np.ndarray: m, n = b.shape return b.reshape((1, m, n)) * x.reshape((-1, m, 1))
PypiClean
/ImmuneDB-0.29.11.tar.gz/ImmuneDB-0.29.11/immunedb/common/mutations.py
import json from sqlalchemy import distinct from immunedb.common.models import Sequence, SequenceCollapse import immunedb.util.lookups as lookups import immunedb.util.funcs as funcs class ContextualMutations(object): """Calculates the mutations of a set of sequences within a given context. :param list regions: The gene region positions relative to the input sequence gapping. """ def __init__(self, regions): self._seen = {} self._regions = regions self._pos_seen = set([]) self.region_muts = {} self.position_muts = {} def add_mutation(self, seq, cdr3_num_nts, mutation, from_aa, intermediate_seq_aa, final_seq_aa, copy_number): """Adds a mutation to the the aggregate mutation list. :param str seq: The sequence with the mutation :param int cdr3_num_nts: The number of bases in the CDR3 :param tuple mutation: The mutation in (position, from_nt, to_nt, type) form. :param char from_aa: The germline amino acid :param char intermediate_seq_aa: The amino acid in the sequence if only the point mutation occurred :param char final_seq_aa: The final mutated amino acid :param int copy_number: The number of times this sequence appeared """ pos, _, _, mtype = mutation region = funcs.get_pos_region(self._regions, cdr3_num_nts, pos) self._add_to_region(seq, cdr3_num_nts, mutation, from_aa, intermediate_seq_aa, final_seq_aa, copy_number, region) self._add_to_region(seq, cdr3_num_nts, mutation, from_aa, intermediate_seq_aa, final_seq_aa, copy_number, 'ALL') if pos not in self.position_muts: self.position_muts[pos] = {} if mtype not in self.position_muts[pos]: self.position_muts[pos][mtype] = 0 self.position_muts[pos][mtype] += 1 def _add_to_region(self, seq, cdr3_num_nts, mutation, from_aa, intermediate_seq_aa, final_seq_aa, copy_number, region): pos, from_nt, to_nt, mtype = mutation if region not in self.region_muts: self.region_muts[region] = {} # If it's a new mutation, setup the dictionaries if mtype not in self.region_muts[region]: self.region_muts[region][mtype] = {} if mutation not in self.region_muts[region][mtype]: self.region_muts[region][mtype][mutation] = { 'pos': pos, 'from_nt': from_nt, 'from_aa': from_aa, 'to_nt': to_nt, 'to_aas': [], 'unique': 0, 'total': 0, 'intermediate_aa': intermediate_seq_aa, } mut_dict = self.region_muts[region][mtype][mutation] if final_seq_aa not in mut_dict['to_aas']: mut_dict['to_aas'].append(final_seq_aa) mut_dict['unique'] += 1 mut_dict['total'] += copy_number def get_all(self): # Strip the dictionary keys and just make a list of mutations final_regions = {} for region, types in self.region_muts.items(): final_regions[region] = {} for mtype, mutations in types.items(): final_regions[region][mtype] = list(mutations.values()) return { 'regions': final_regions, 'positions': self.position_muts } class CloneMutations(object): def __init__(self, session, clone): self._clone = clone self._session = session self._germline = self._clone.consensus_germline def _get_codon_at(self, seq, i): aa_off = i - i % 3 return seq[aa_off:aa_off + 3] def _get_aa_at(self, seq, i): return lookups.aa_from_codon(self._get_codon_at(seq, i)) def _get_mutation(self, seq, i): if (self._germline[i] != seq[i] and self._germline[i] not in ('N', '-') and seq[i] not in ('N', '-')): grm_aa = self._get_aa_at(self._germline, i) # Simulate this mutation alone off = i % 3 grm_codon = self._get_codon_at(self._germline, i) seq_aa = lookups.aa_from_codon( grm_codon[:off] + seq[i] + grm_codon[off+1:]) if grm_aa is None or seq_aa is None: return 'unknown', seq_aa elif grm_aa != seq_aa: if lookups.are_conserved_aas(grm_aa, seq_aa): return 'conservative', seq_aa return 'nonconservative', seq_aa else: return 'synonymous', seq_aa return None, None def calculate(self, commit_seqs=False, limit_samples=None): sample_mutations = {} if limit_samples is not None: sample_ids = limit_samples else: sample_ids = [r.sample_id for r in self._session.query( distinct(Sequence.sample_id).label('sample_id') ).filter( Sequence.clone == self._clone )] sample_ids.append(None) for sample_id in sample_ids: seqs = self._session.query(Sequence).filter( Sequence.clone == self._clone) if sample_id is None: seqs = seqs.join(SequenceCollapse).filter( SequenceCollapse.copy_number_in_subject > 0 ) else: seqs = seqs.filter( Sequence.sample_id == sample_id ) sample_mutations[sample_id] = self._get_contextual_mutations( seqs, commit_seqs, use_sample_copy=sample_id is not None) return sample_mutations def _get_contextual_mutations(self, seqs, commit_seqs, use_sample_copy): context_mutations = ContextualMutations(self._clone.regions) for seq in seqs: seq_mutations = {} for i in range(0, len(seq.clone_sequence)): mtype, intermediate_aa = self._get_mutation( seq.clone_sequence, i) if mtype is None: continue from_aa = self._get_aa_at(self._germline, i) seq_mutations[i] = mtype mutation = (i, self._germline[i], seq.clone_sequence[i], mtype) copy_field = ( seq.copy_number if use_sample_copy else seq.collapse.copy_number_in_subject ) context_mutations.add_mutation( seq.clone_sequence, self._clone.cdr3_num_nts, mutation, from_aa, intermediate_aa, self._get_aa_at(seq.clone_sequence, i), copy_field) if commit_seqs: seq.mutations_from_clone = json.dumps(seq_mutations) return context_mutations def threshold_mutations(all_muts, min_required_seqs): """Removes mutations that occur in less than ``min_required_seqs`` :param dict all_muts: A mutation dictionary as generated by ContextualMutations :param int min_required_seqs: The minimum number of sequences in which a mutation must occur to be in the thresholded mutations :returns dict: A new mutation dictionary with infrequent mutations removed """ final = {} for region, types in all_muts['regions'].items(): final[region] = { 'counts': { 'total': {}, 'unique': {} }, 'mutations': {} } for mtype, mutations in sorted(types.items()): for mutation in sorted( mutations, key=lambda m: (m['pos'], m['from_nt'], m['to_nt'])): if mutation['unique'] >= min_required_seqs: if mtype not in final[region]['mutations']: final[region]['mutations'][mtype] = [] if mtype not in final[region]['counts']['total']: final[region]['counts']['total'][mtype] = 0 final[region]['counts']['unique'][mtype] = 0 final[region]['mutations'][mtype].append(mutation) final[region]['counts']['total'][mtype] += \ mutation['unique'] final[region]['counts']['unique'][mtype] += 1 return final
PypiClean
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/localflavor/in_/in_states.py
STATE_CHOICES = ( ('KA', 'Karnataka'), ('AP', 'Andhra Pradesh'), ('KL', 'Kerala'), ('TN', 'Tamil Nadu'), ('MH', 'Maharashtra'), ('UP', 'Uttar Pradesh'), ('GA', 'Goa'), ('GJ', 'Gujarat'), ('RJ', 'Rajasthan'), ('HP', 'Himachal Pradesh'), ('JK', 'Jammu and Kashmir'), ('AR', 'Arunachal Pradesh'), ('AS', 'Assam'), ('BR', 'Bihar'), ('CG', 'Chattisgarh'), ('HR', 'Haryana'), ('JH', 'Jharkhand'), ('MP', 'Madhya Pradesh'), ('MN', 'Manipur'), ('ML', 'Meghalaya'), ('MZ', 'Mizoram'), ('NL', 'Nagaland'), ('OR', 'Orissa'), ('PB', 'Punjab'), ('SK', 'Sikkim'), ('TR', 'Tripura'), ('UA', 'Uttarakhand'), ('WB', 'West Bengal'), # Union Territories ('AN', 'Andaman and Nicobar'), ('CH', 'Chandigarh'), ('DN', 'Dadra and Nagar Haveli'), ('DD', 'Daman and Diu'), ('DL', 'Delhi'), ('LD', 'Lakshadweep'), ('PY', 'Pondicherry'), ) STATES_NORMALIZED = { 'an': 'AN', 'andaman and nicobar': 'AN', 'andra pradesh': 'AP', 'andrapradesh': 'AP', 'andhrapradesh': 'AP', 'ap': 'AP', 'andhra pradesh': 'AP', 'ar': 'AR', 'arunachal pradesh': 'AR', 'assam': 'AS', 'as': 'AS', 'bihar': 'BR', 'br': 'BR', 'cg': 'CG', 'chattisgarh': 'CG', 'ch': 'CH', 'chandigarh': 'CH', 'daman and diu': 'DD', 'dd': 'DD', 'dl': 'DL', 'delhi': 'DL', 'dn': 'DN', 'dadra and nagar haveli': 'DN', 'ga': 'GA', 'goa': 'GA', 'gj': 'GJ', 'gujarat': 'GJ', 'himachal pradesh': 'HP', 'hp': 'HP', 'hr': 'HR', 'haryana': 'HR', 'jharkhand': 'JH', 'jh': 'JH', 'jammu and kashmir': 'JK', 'jk': 'JK', 'karnataka': 'KA', 'karnatka': 'KA', 'ka': 'KA', 'kerala': 'KL', 'kl': 'KL', 'ld': 'LD', 'lakshadweep': 'LD', 'maharastra': 'MH', 'mh': 'MH', 'maharashtra': 'MH', 'meghalaya': 'ML', 'ml': 'ML', 'mn': 'MN', 'manipur': 'MN', 'madhya pradesh': 'MP', 'mp': 'MP', 'mizoram': 'MZ', 'mizo': 'MZ', 'mz': 'MZ', 'nl': 'NL', 'nagaland': 'NL', 'orissa': 'OR', 'odisa': 'OR', 'orisa': 'OR', 'or': 'OR', 'pb': 'PB', 'punjab': 'PB', 'py': 'PY', 'pondicherry': 'PY', 'rajasthan': 'RJ', 'rajastan': 'RJ', 'rj': 'RJ', 'sikkim': 'SK', 'sk': 'SK', 'tamil nadu': 'TN', 'tn': 'TN', 'tamilnadu': 'TN', 'tamilnad': 'TN', 'tr': 'TR', 'tripura': 'TR', 'ua': 'UA', 'uttarakhand': 'UA', 'up': 'UP', 'uttar pradesh': 'UP', 'westbengal': 'WB', 'bengal': 'WB', 'wb': 'WB', 'west bengal': 'WB' }
PypiClean
/Nitrous-0.9.3-py3-none-any.whl/turbogears/identity/visitor.py
from builtins import str from builtins import map from builtins import object __all__ = ['IdentityVisitPlugin', 'create_extension_model', 'shutdown_extension', 'start_extension'] import base64 import logging from cherrypy import request from formencode.variabledecode import variable_decode, variable_encode from turbogears import config, visit from turbogears.identity import (create_default_provider, set_current_identity, set_current_provider, set_login_attempted) from turbogears.identity.exceptions import (IdentityConfigurationException, IdentityException, IdentityFailure) log = logging.getLogger('turbogears.identity') # Global Visit plugin _plugin = None # Interface for the TurboGears extension def start_extension(): """Register the IdentityVisitPlugin with the visit plugin framework. Also sets up the configured Identity provider. """ global _plugin # Bail out if the application hasn't enabled this extension if not config.get('identity.on', False): return # Identity requires that Visit tracking be enabled if not config.get('visit.on', False): raise IdentityConfigurationException( "Visit tracking must be enabled (via visit.on).") # Bail out if Visit tracking plugin is already registered if _plugin: log.info("Identity already started") return log.info("Identity starting") # Temporary until tg-admin can call create_extension_model create_extension_model() # Create and register the plugin for the Visit Tracking framework _plugin = IdentityVisitPlugin() visit.enable_visit_plugin(_plugin) def shutdown_extension(): """Stops the IdentityVisitPlugin.""" global _plugin # Bail out if the application hasn't enabled this extension if not config.get('identity.on', False): return # Bail out if the Visit tracking plugin is already unregistered if not _plugin: log.info("Identity already shut down") return # Unregister the plugin for the Visit Tracking framework visit.disable_visit_plugin(_plugin) _plugin = None log.info("Identity has been shut down.") def create_extension_model(): """Create the identity provider object.""" provider = create_default_provider() provider.create_provider_model() class IdentityVisitPlugin(object): """Visit plugin tying the Identity framework to the visit management.""" def __init__(self): log.info("Identity visit plugin initialized") get = config.get self.provider = create_default_provider() # When retrieving identity information from the form, use the following # form field names. These fields will be removed from the post data to # prevent the controller from receiving unexpected fields. self.user_name_field = get('identity.form.user_name', 'user_name') self.password_field = get('identity.form.password', 'password') self.submit_button_name = get('identity.form.submit', 'login') # Sources for identity information and the order in which they should be # checked. These terms are mapped to methods by prepending # "identity_from_". sources = [_f for _f in map(str.strip, get('identity.source', 'form,http_auth,visit').split(',')) if _f] if not sources: raise IdentityConfigurationException( "You must set some identity source (via identity.source).") if 'http_auth' in sources and not get('identity.http_basic_auth'): sources.remove('http_auth') if 'visit' in sources and not get('visit.on'): sources.remove('visit') if not sources: raise IdentityConfigurationException( "You must activate at least one of the identity sources.") self.identity_sources = list() for s in sources: if s: try: source_method = getattr(self, 'identity_from_' + s) except AttributeError: raise IdentityConfigurationException("Invalid " "identity source: %s (check identity.source)" % s) self.identity_sources.append(source_method) def identity_from_request(self, visit_key): """Retrieve identity information from the HTTP request. Checks first for form fields defining the identity then for a cookie. If no identity is found, returns an anonymous identity. """ identity = None log.debug("Retrieving identity for visit: %s", visit_key) for source in self.identity_sources: identity = source(visit_key) if identity: return identity log.debug("No identity found") # No source reported an identity identity = self.provider.anonymous_identity() return identity def decode_basic_credentials(self, credentials): """Decode base64 user_name:password credentials used in Basic Auth. Returns a list with username in element 0 and password in element 1. """ credentials = base64.decodestring(credentials.strip()) try: credentials = credentials.decode('utf-8') except UnicodeError: try: credentials = credentials.decode('latin-1') except UnicodeError: credentials = '' credentials = credentials.split(':', 1) if len(credentials) < 2: credentials.append('') return credentials def identity_from_http_auth(self, visit_key): """Try to get authentication data from Authorization request header. Only HTTP basic auth is handled at the moment. """ try: authorisation = request.headers['Authorization'] except KeyError: return None authScheme, schemeData = authorisation.split(' ', 1) # Only basic is handled at the moment if authScheme.lower() != 'basic': log.error("HTTP Auth is not basic") return None # decode credentials user_name, password = self.decode_basic_credentials(schemeData) set_login_attempted(True) return self.provider.validate_identity(user_name, password, visit_key) def identity_from_visit(self, visit_key): """Load identity from Identity provider.""" return self.provider.load_identity(visit_key) def identity_from_form(self, visit_key): """Inspect the request params to pull out identity information. Must have fields for user name, password, and a login submit button. Returns an identity object whose class depends on the current identity provider or None if the form contained no identity information or the information was incorrect. """ # only try to process credentials for login forms params = request.params if params.pop(self.submit_button_name, None) is None: return None # form data contains login credentials params.pop(self.submit_button_name + '.x', None) params.pop(self.submit_button_name + '.y', None) user_name = params.pop(self.user_name_field, None) password = params.pop(self.password_field, None) if user_name is None: log.error("Missing user name in login form") return None elif isinstance(user_name, list): log.error("Multiple user names in login form") return None if password is None: log.error("Missing password in login form") return None elif isinstance(password, list): log.error("Multiple passwords in login form") return None set_login_attempted(True) identity = self.provider.validate_identity( user_name, password, visit_key) if identity is None: log.warning("The credentials specified weren't valid") return None return identity def record_request(self, visit): """Authenticate request and try to associate the visit with an identity.""" # This method is called by the visit plugin mechanism on each request with a visit key. # default to keeping the identity filter off if not config.get('identity.on', False): log.debug("Identity is not enabled. Setting current identity to None") set_current_identity(None) return if 'identity.path_info' in request.wsgi_environ: # restore path_info and params after internal redirect request.path_info = request.wsgi_environ.pop('identity.path_info') request.params = request.wsgi_environ.pop('identity.params', {}) try: identity = self.identity_from_request(visit.key) except IdentityException as e: log.exception("Caught exception while getting identity from request") errors = [str(e)] raise IdentityFailure(errors) log.debug("Identity is available...") # stash the user in the thread data for this request set_current_identity(identity) set_current_provider(self.provider)
PypiClean
/EZID-0.3.tar.gz/EZID-0.3/DSC_EZID_minter.py
# Digital Special Collection minter for new arks from ezid # Set the default metadata profile to dc (_profile dc) # Save the new ark in a text file for later reference # Currently, EZID does not provide a list of your arks # You must save the ark somewhere for recall import os, sys import datetime from EZID import EZIDClient from config_reader import read_config import plac EZID_index = os.path.join(os.environ['HOME'], 'indexes/EZID.txt') def save_new_id(ark): '''Save the ark to the index, just a text file for now ''' with open(EZID_index, 'a+') as f: f.write(ark.strip()+','+datetime.datetime.now().strftime('%Y-%m-%d %H:%m:%S')+'\n') @plac.annotations( number=("Number of new ARKs to mint", 'positional', None, int), shoulder=("EZID shoulder to mint from", 'option', None, str), verbose=("Print each id when retrieved", 'flag', 'v') ) def main(number, shoulder=None, username=None, password=None, verbose=False, metadata=None): if not shoulder or not username or not password: HOME = os.environ['HOME'] config_file= os.environ.get('DATABASES_XML_EZID', HOME + '/.databases.xml') dbs = read_config(config_file=config_file) USERNAME = dbs['EZID']['USER'] PASSWORD = dbs['EZID']['PASSWORD'] SHOULDER = dbs['EZID']['SHOULDER'] shoulder = shoulder if shoulder else SHOULDER username = username if username else USERNAME password = password if password else PASSWORD ezid = EZIDClient(credentials=dict(username=username, password=password)) new_ids = [] for x in range(0, number): if not metadata: metadata = {'_profile':'dc',} ez = ezid.mint(shoulder=shoulder, data=metadata) save_new_id(ez) new_ids.append(ez) metadata['_target'] = 'http://content.cdlib.org/'+ez resp = ezid.update(ez, metadata) print resp if verbose: print ez return new_ids if __name__=='__main__': new_ids = plac.call(main) for ez in new_ids: print ez
PypiClean
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/transcript_detection.py
import os import sys import numpy as np from annogesiclib.lib_reader import read_wig, read_libs from annogesiclib.coverage_detection import get_repmatch def check_tex_conds(tracks, libs, texs, check_tex, conds, tex_notex): for track in tracks: for lib in libs: if lib["name"] == track: if "tex" in lib["type"]: type_ = "tex" else: type_ = "frag" index = "_".join([lib["cond"]]) if len(texs) != 0: for key, num in texs.items(): if track in key: if (texs[key] >= tex_notex) and ( key not in check_tex): check_tex.append(key) if index not in conds.keys(): conds[index] = 1 else: conds[index] += 1 else: if index not in conds.keys(): conds[index] = 1 else: conds[index] += 1 def detect_hight_toler(cover, height, tmp_covers, tracks, lib_track): if cover > height: if tmp_covers["best"] < cover: tmp_covers["best"] = cover tracks.append(lib_track) else: if cover > tmp_covers["toler"]: tmp_covers["toler"] = cover def elongation(lib_conds, template_texs, libs, strand, trans, args_tran, strain, tolers): '''check coverage and replicate match to form transcript''' first = True pre_pos = -1 check_tex = [] tracks = [] conds = {} pre_wig = None detect = False texs = template_texs.copy() tmp_covers = {"best": 0, "toler": -1} for cond, lib_tracks in lib_conds.items(): for lib_name, covers in lib_tracks.items(): index_pos = 0 for cover in covers: for cond, lib_tracks in lib_conds.items(): for lib_track in lib_tracks.keys(): real_track = lib_track.split("|")[-3] if index_pos < len(lib_tracks[lib_track]): compare_cover = lib_tracks[lib_track][index_pos] else: compare_cover = 0 detect_hight_toler( compare_cover, args_tran.height, tmp_covers, tracks, real_track) for track in tracks: if len(texs) != 0: for key, num in texs.items(): if track in key: texs[key] += 1 check_tex_conds(tracks, libs, texs, check_tex, conds, args_tran.tex) for cond, detect_num in conds.items(): if ("tex" in cond): tex_rep = get_repmatch(args_tran.replicates["tex"], cond) if detect_num >= tex_rep: detect = True elif ("frag" in cond): frag_rep = get_repmatch(args_tran.replicates["frag"], cond) if detect_num >= frag_rep: detect = True if detect: detect = False trans[strain].append(tmp_covers["best"]) else: trans[strain].append(-1) if (tmp_covers["toler"] != -1): tolers.append(tmp_covers["toler"]) else: tolers.append(args_tran.height + 10) tmp_covers = {"best": 0, "toler": -1} tracks = [] conds = {} check_tex = [] texs = template_texs.copy() index_pos += 1 break break def transfer_to_tran(wigs, libs, template_texs, strand, args_tran): '''check coverage and replicate match to form transcript''' tolers = {} trans = {} detect = False for strain, lib_conds in wigs.items(): if strain not in trans: trans[strain] = [] tolers[strain] = [] elongation(lib_conds, template_texs, libs, strand, trans, args_tran, strain, tolers[strain]) return tolers, trans def print_transcript(finals, out): for strain, datas in finals.items(): num = 0 datas = sorted(datas, key=lambda x: ( x["start"], x["end"], x["strand"])) for data in datas: name = '%0*d' % (5, num) attribute = ";".join(["=".join(items) for items in ([ ("ID", strain + "_transcript" + str(num)), ("Name", "transcript_" + name), ("high_coverage", str(data["high"])), ("low_coverage", str(data["low"])), ("detect_lib", data["wig"])])]) out.write("\t".join([str(field) for field in [ strain, "ANNOgesic", "transcript", str(data["start"]), str(data["end"]), ".", data["strand"], ".", attribute]]) + "\n") num += 1 def fill_gap_and_print(trans, strand, finals, tolers, wig_type, args_tran): '''compare transcript with CDS to modify transcript(merge mutliple transcript based on overlap with the same CDS)''' for strain, covers in trans.items(): if strain not in finals: finals[strain] = [] first = True start = -1 end = -1 pre_cover = None cover_pos = 1 for cover in covers: fit = True if cover != -1: if first: first = False start = cover_pos high_cover = cover low_cover = cover else: if (cover_pos - pre_pos) <= args_tran.tolerance: if cover_pos - pre_pos > 1: for toler_strain, toler_datas in tolers.items(): if toler_strain == strain: toler_covers = toler_datas[ (pre_pos - 1): cover_pos] for toler_cover in toler_covers: if (toler_cover < args_tran.low_cutoff): fit = False break if fit: end = cover_pos if high_cover < cover: high_cover = cover if low_cover > cover: low_cover = cover if ((cover_pos - pre_pos) > args_tran.tolerance) or (not fit): if (start != -1) and (end != -1) and ( (end - start) >= args_tran.width): finals[strain].append({ "start": start, "end": end, "strand": strand, "high": high_cover, "low": low_cover, "wig": wig_type}) start = cover_pos end = -1 high_cover = cover low_cover = cover pre_cover = cover pre_pos = cover_pos cover_pos += 1 if (len(covers) != 0) and (not first) and ( (start != -1) and (end != -1) and ( (end - start) >= args_tran.width)): finals[strain].append({ "start": start, "end": end, "strand": strand, "high": high_cover, "low": low_cover, "wig": wig_type}) return finals def detect_transcript(wig_f_file, wig_r_file, wig_folder, input_lib, out_file, wig_type, args_tran): out = open(out_file, "w") out.write("##gff-version 3\n") finals = {} libs, texs = read_libs(input_lib, wig_folder) wig_fs = read_wig(wig_f_file, "+", libs) wig_rs = read_wig(wig_r_file, "-", libs) tolers_f, tran_fs = transfer_to_tran(wig_fs, libs, texs, "+", args_tran) tolers_r, tran_rs = transfer_to_tran(wig_rs, libs, texs, "-", args_tran) fill_gap_and_print(tran_fs, "+", finals, tolers_f, wig_type, args_tran) fill_gap_and_print(tran_rs, "-", finals, tolers_r, wig_type, args_tran) print_transcript(finals, out) out.close() del wig_fs del wig_rs
PypiClean
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/imageFonts.js
(function(b,c,a){var d="2.7.9";b.Register.LoadHook(c.fontDir+"/fontdata.js",function(){c.Augment({allowWebFonts:false,imgDir:c.webfontDir+"/png",imgPacked:(MathJax.isPacked?"":"/unpacked"),imgSize:["050","060","071","085",100,120,141,168,200,238,283,336,400,476],imgBaseIndex:4,imgSizeForEm:{},imgSizeForScale:{},imgZoom:1,handleImg:function(t,i,r,h,u){if(u.length){this.addText(t,u)}var s=r[5].orig;if(!s){s=r[5].orig=[r[0],r[1],r[2],r[3],r[4]]}var m=this.imgZoom;if(!t.scale){t.scale=1}var p=this.imgIndex(t.scale*m);if(p==this.imgEmWidth.length-1&&this.em*t.scale*m/this.imgEmWidth[p]>1.1){m=this.imgEmWidth[p]/(this.em*t.scale)}var q=this.imgEmWidth[p]/(this.em*(t.scale||1)*m);r[0]=s[0]*q;r[1]=s[1]*q;r[2]=s[2]*q;r[3]=s[3]*q;r[4]=s[4]*q;var k=this.imgDir+"/"+i.directory+"/"+this.imgSize[p];var l=h.toString(16).toUpperCase();while(l.length<4){l="0"+l}var j=k+"/"+l+".png";var o=r[5].img[p];var g={width:Math.floor(o[0]/m+0.5)+"px",height:Math.floor(o[1]/m+0.5)+"px"};if(o[2]){g.verticalAlign=Math.floor(-o[2]/m+0.5)+"px"}if(r[3]<0){g.marginLeft=this.Em(r[3]/1000)}if(r[4]!=r[2]){g.marginRight=this.Em((r[2]-r[4])/1000)}if(this.msieIE6){g.filter="progid:DXImageTransform.Microsoft.AlphaImageLoader(src='"+a.urlRev(j)+"', sizingMethod='scale')";j=this.directory+"/blank.gif"}this.addElement(t,"img",{src:a.urlRev(j),style:g,isMathJax:true});return""},defineImageData:function(i){for(var g in i){if(i.hasOwnProperty(g)){var h=c.FONTDATA.FONTS[g];if(h){g=i[g];for(var j in g){if(g.hasOwnProperty(j)&&h[j]){h[j][5]={img:g[j]}}}}}}},initImg:function(j){if(this.imgSizeForEm[this.em]){this.imgBaseIndex=this.imgSizeForEm[this.em]}for(var h=0,g=this.imgEmWidth.length-1;h<g;h++){if(this.em<=this.imgEmWidth[h]){break}}if(h&&this.imgEmWidth[h]-this.em>this.em-this.imgEmWidth[h-1]){h--}this.imgSizeForEm[this.em]=this.imgBaseIndex=h;this.imgZoom=this.imgBrowserZoom()},imgIndex:function(k){if(!k){return this.imgBaseIndex}if(!this.imgSizeForScale[this.em]){this.imgSizeForScale[this.em]={}}if(this.imgSizeForScale[this.em][k]){return this.imgSizeForScale[this.em][k]}var j=this.em*k;for(var h=0,g=this.imgEmWidth.length-1;h<g;h++){if(j<=this.imgEmWidth[h]){break}}if(h&&this.imgEmWidth[h]-j>j-this.imgEmWidth[h-1]){h--}this.imgSizeForScale[this.em][k]=h;return h},imgBrowserZoom:function(){return 1}});b.Browser.Select({Firefox:function(h){var g=c.addElement(document.body,"div",{style:{display:"none",visibility:"hidden",overflow:"scroll",position:"absolute",top:0,left:0,width:"200px",height:"200px",padding:0,border:0,margin:0}});var i=c.addElement(g,"div",{style:{position:"absolute",left:0,top:0,right:0,bottom:0,padding:0,border:0,margin:0}});c.Augment({imgSpaceBug:true,imgSpace:"\u00A0",imgZoomLevel:(h.isMac?{50:0.3,30:0.5,22:0.67,19:0.8,16:0.9,15:1,13:1.1,12:1.2,11:1.33,10:1.5,9:1.7,7:2,6:2.4,5:3,0:15}:{56:0.3,34:0.5,25:0.67,21:0.8,19:0.9,17:1,15:1.1,14:1.2,13:1.33,11:1.5,10:1.7,8:2,7:2.4,6:3,0:17}),imgZoomDiv:g,imgBrowserZoom:function(){var j=this.imgZoomLevel;g.style.display="";var k=(g.offsetWidth-i.offsetWidth);k=(j[k]?j[k]:j[0]/k);g.style.display="none";return k}})},Safari:function(g){c.Augment({imgBrowserZoom:function(){return 3}})},Chrome:function(g){c.Augment({imgHeightBug:true,imgBrowserZoom:function(){return 3}})},Opera:function(g){c.Augment({imgSpaceBug:true,imgSpace:"\u00A0\u00A0",imgDoc:(document.compatMode=="BackCompat"?document.body:document.documentElement),imgBrowserZoom:function(){if(g.isMac){return 3}var h=this.imgDoc.clientHeight,i=Math.floor(15*h/window.innerHeight);if(this.imgDoc.clientWidth<this.imgDoc.scrollWidth-i){h+=i}return parseFloat((window.innerHeight/h).toFixed(1))}})}});var f=function(){var h=c.FONTDATA.FONTS.MathJax_Main[8212][5].img;c.imgEmWidth=[];for(var j=0,g=h.length;j<g;j++){c.imgEmWidth[j]=h[j][0]}};var e=c.imgDir+c.imgPacked;MathJax.Callback.Queue(["Require",a,e+"/imagedata.js"],f,["loadComplete",a,c.directory+"/imageFonts.js"])})})(MathJax.Hub,MathJax.OutputJax["HTML-CSS"],MathJax.Ajax);
PypiClean
/KeyCat-0.2.5.tar.gz/KeyCat-0.2.5/keycat/models.py
import numpy from key_codes import key_label_dictionary from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship from sqlalchemy import Column, Integer, String, ForeignKey, LargeBinary Base = declarative_base() class Button(Base): __tablename__ = 'button' id = Column(String, primary_key=True) templates = relationship("Template", back_populates="button", cascade="all, delete-orphan") shortcuts = relationship("Shortcut", back_populates="button", cascade="all, delete-orphan") program = Column(String) name = Column(String) def __init__(self, id, program, name, templates, shortcuts): self.id = id self.templates = templates self.shortcuts = shortcuts self.program = program self.name = name def __eq__(self, other): return self.id == other.id and self.program == other.program def __repr__(self): return self.id + " " + self.program + " " + str(self.shortcuts) + " " + str(self.templates) class Template(Base): __tablename__ = 'template' id = Column(Integer, primary_key=True) template_string = Column(LargeBinary) width = Column(Integer) height = Column(Integer) button_id = Column(Integer, ForeignKey('button.id')) button = relationship("Button", back_populates="templates") def __init__(self, template_string, height, width): self.template_string = template_string self.width = width self.height = height def get_template_as_numpy_array(self): return numpy.reshape(numpy.fromstring(self.template_string, dtype=numpy.uint8), (self.height, self.width)) def __eq__(self, other): return self.template_string == other.template_string and self.width == other.width \ and self.height == other.height def __repr__(self): return str(self.width) + " " + str(self.height) class Shortcut(Base): __tablename__ = 'shortcut' id = Column(Integer, primary_key=True) button_id = Column(Integer, ForeignKey('button.id')) button = relationship("Button", back_populates="shortcuts") keycodes = Column(String) def get_keycodes_in_readable_format(self): return "+".join(map(lambda x: key_label_dictionary[int(x)], self.keycodes.split(","))) def __init__(self, keycodes): self.keycodes = keycodes def __eq__(self, other): return self.keycodes == other.keycodes def __repr__(self): return self.keycodes class ShortcutStat(Base): __tablename__ = 'shortcut_stat' id = Column(Integer, primary_key=True) shortcut_id = Column(Integer, ForeignKey('shortcut.id')) shortcut = relationship("Shortcut") hit_count = Column(Integer) def __init__(self, shortcut, hit_count): self.shortcut = shortcut self.hit_count = hit_count def __eq__(self, other): return self.shortcut == other.shortcut and self.hit_count == other.hit_count class ButtonStat(Base): __tablename__ = 'button_stat' id = Column(Integer, primary_key=True) button_id = Column(Integer, ForeignKey('button.id')) button = relationship("Button") hit_count = Column(Integer) def __init__(self, button, hit_count): self.button = button self.hit_count = hit_count def __eq__(self, other): return self.button == other.button and self.hit_count == other.hit_count
PypiClean
/FastFlask-1.2.32-py3-none-any.whl/werkzeug/exceptions.py
import sys import typing as t import warnings from datetime import datetime from html import escape from ._internal import _get_environ if t.TYPE_CHECKING: import typing_extensions as te from _typeshed.wsgi import StartResponse from _typeshed.wsgi import WSGIEnvironment from .datastructures import WWWAuthenticate from .sansio.response import Response from .wrappers.request import Request as WSGIRequest # noqa: F401 from .wrappers.response import Response as WSGIResponse # noqa: F401 class HTTPException(Exception): """The base class for all HTTP exceptions. This exception can be called as a WSGI application to render a default error page or you can catch the subclasses of it independently and render nicer error messages. """ code: t.Optional[int] = None description: t.Optional[str] = None def __init__( self, description: t.Optional[str] = None, response: t.Optional["Response"] = None, ) -> None: super().__init__() if description is not None: self.description = description self.response = response @classmethod def wrap( cls, exception: t.Type[BaseException], name: t.Optional[str] = None ) -> t.Type["HTTPException"]: """Create an exception that is a subclass of the calling HTTP exception and the ``exception`` argument. The first argument to the class will be passed to the wrapped ``exception``, the rest to the HTTP exception. If ``e.args`` is not empty and ``e.show_exception`` is ``True``, the wrapped exception message is added to the HTTP error description. .. deprecated:: 2.0 Will be removed in Werkzeug 2.1. Create a subclass manually instead. .. versionchanged:: 0.15.5 The ``show_exception`` attribute controls whether the description includes the wrapped exception message. .. versionchanged:: 0.15.0 The description includes the wrapped exception message. """ warnings.warn( "'HTTPException.wrap' is deprecated and will be removed in" " Werkzeug 2.1. Create a subclass manually instead.", DeprecationWarning, stacklevel=2, ) class newcls(cls, exception): # type: ignore _description = cls.description show_exception = False def __init__( self, arg: t.Optional[t.Any] = None, *args: t.Any, **kwargs: t.Any ) -> None: super().__init__(*args, **kwargs) if arg is None: exception.__init__(self) else: exception.__init__(self, arg) @property def description(self) -> str: if self.show_exception: return ( f"{self._description}\n" f"{exception.__name__}: {exception.__str__(self)}" ) return self._description # type: ignore @description.setter def description(self, value: str) -> None: self._description = value newcls.__module__ = sys._getframe(1).f_globals["__name__"] name = name or cls.__name__ + exception.__name__ newcls.__name__ = newcls.__qualname__ = name return newcls @property def name(self) -> str: """The status name.""" from .http import HTTP_STATUS_CODES return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore def get_description( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> str: """Get the description.""" if self.description is None: description = "" elif not isinstance(self.description, str): description = str(self.description) else: description = self.description description = escape(description).replace("\n", "<br>") return f"<p>{description}</p>" def get_body( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> str: """Get the HTML body.""" return ( '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n' f"<title>{self.code} {escape(self.name)}</title>\n" f"<h1>{escape(self.name)}</h1>\n" f"{self.get_description(environ)}\n" ) def get_headers( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> t.List[t.Tuple[str, str]]: """Get a list of headers.""" return [("Content-Type", "text/html; charset=utf-8")] def get_response( self, environ: t.Optional[t.Union["WSGIEnvironment", "WSGIRequest"]] = None, scope: t.Optional[dict] = None, ) -> "Response": """Get a response object. If one was passed to the exception it's returned directly. :param environ: the optional environ for the request. This can be used to modify the response depending on how the request looked like. :return: a :class:`Response` object or a subclass thereof. """ from .wrappers.response import Response as WSGIResponse # noqa: F811 if self.response is not None: return self.response if environ is not None: environ = _get_environ(environ) headers = self.get_headers(environ, scope) return WSGIResponse(self.get_body(environ, scope), self.code, headers) def __call__( self, environ: "WSGIEnvironment", start_response: "StartResponse" ) -> t.Iterable[bytes]: """Call the exception as WSGI application. :param environ: the WSGI environment. :param start_response: the response callable provided by the WSGI server. """ response = t.cast("WSGIResponse", self.get_response(environ)) return response(environ, start_response) def __str__(self) -> str: code = self.code if self.code is not None else "???" return f"{code} {self.name}: {self.description}" def __repr__(self) -> str: code = self.code if self.code is not None else "???" return f"<{type(self).__name__} '{code}: {self.name}'>" class BadRequest(HTTPException): """*400* `Bad Request` Raise if the browser sends something to the application the application or server cannot handle. """ code = 400 description = ( "The browser (or proxy) sent a request that this server could " "not understand." ) class BadRequestKeyError(BadRequest, KeyError): """An exception that is used to signal both a :exc:`KeyError` and a :exc:`BadRequest`. Used by many of the datastructures. """ _description = BadRequest.description #: Show the KeyError along with the HTTP error message in the #: response. This should be disabled in production, but can be #: useful in a debug mode. show_exception = False def __init__(self, arg: t.Optional[str] = None, *args: t.Any, **kwargs: t.Any): super().__init__(*args, **kwargs) if arg is None: KeyError.__init__(self) else: KeyError.__init__(self, arg) @property # type: ignore def description(self) -> str: # type: ignore if self.show_exception: return ( f"{self._description}\n" f"{KeyError.__name__}: {KeyError.__str__(self)}" ) return self._description @description.setter def description(self, value: str) -> None: self._description = value class ClientDisconnected(BadRequest): """Internal exception that is raised if Werkzeug detects a disconnected client. Since the client is already gone at that point attempting to send the error message to the client might not work and might ultimately result in another exception in the server. Mainly this is here so that it is silenced by default as far as Werkzeug is concerned. Since disconnections cannot be reliably detected and are unspecified by WSGI to a large extent this might or might not be raised if a client is gone. .. versionadded:: 0.8 """ class SecurityError(BadRequest): """Raised if something triggers a security error. This is otherwise exactly like a bad request error. .. versionadded:: 0.9 """ class BadHost(BadRequest): """Raised if the submitted host is badly formatted. .. versionadded:: 0.11.2 """ class Unauthorized(HTTPException): """*401* ``Unauthorized`` Raise if the user is not authorized to access a resource. The ``www_authenticate`` argument should be used to set the ``WWW-Authenticate`` header. This is used for HTTP basic auth and other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate` to create correctly formatted values. Strictly speaking a 401 response is invalid if it doesn't provide at least one value for this header, although real clients typically don't care. :param description: Override the default message used for the body of the response. :param www-authenticate: A single value, or list of values, for the WWW-Authenticate header(s). .. versionchanged:: 2.0 Serialize multiple ``www_authenticate`` items into multiple ``WWW-Authenticate`` headers, rather than joining them into a single value, for better interoperability. .. versionchanged:: 0.15.3 If the ``www_authenticate`` argument is not set, the ``WWW-Authenticate`` header is not set. .. versionchanged:: 0.15.3 The ``response`` argument was restored. .. versionchanged:: 0.15.1 ``description`` was moved back as the first argument, restoring its previous position. .. versionchanged:: 0.15.0 ``www_authenticate`` was added as the first argument, ahead of ``description``. """ code = 401 description = ( "The server could not verify that you are authorized to access" " the URL requested. You either supplied the wrong credentials" " (e.g. a bad password), or your browser doesn't understand" " how to supply the credentials required." ) def __init__( self, description: t.Optional[str] = None, response: t.Optional["Response"] = None, www_authenticate: t.Optional[ t.Union["WWWAuthenticate", t.Iterable["WWWAuthenticate"]] ] = None, ) -> None: super().__init__(description, response) from .datastructures import WWWAuthenticate if isinstance(www_authenticate, WWWAuthenticate): www_authenticate = (www_authenticate,) self.www_authenticate = www_authenticate def get_headers( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> t.List[t.Tuple[str, str]]: headers = super().get_headers(environ, scope) if self.www_authenticate: headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate) return headers class Forbidden(HTTPException): """*403* `Forbidden` Raise if the user doesn't have the permission for the requested resource but was authenticated. """ code = 403 description = ( "You don't have the permission to access the requested" " resource. It is either read-protected or not readable by the" " server." ) class NotFound(HTTPException): """*404* `Not Found` Raise if a resource does not exist and never existed. """ code = 404 description = ( "The requested URL was not found on the server. If you entered" " the URL manually please check your spelling and try again." ) class MethodNotAllowed(HTTPException): """*405* `Method Not Allowed` Raise if the server used a method the resource does not handle. For example `POST` if the resource is view only. Especially useful for REST. The first argument for this exception should be a list of allowed methods. Strictly speaking the response would be invalid if you don't provide valid methods in the header which you can do with that list. """ code = 405 description = "The method is not allowed for the requested URL." def __init__( self, valid_methods: t.Optional[t.Iterable[str]] = None, description: t.Optional[str] = None, response: t.Optional["Response"] = None, ) -> None: """Takes an optional list of valid http methods starting with werkzeug 0.3 the list will be mandatory.""" super().__init__(description=description, response=response) self.valid_methods = valid_methods def get_headers( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> t.List[t.Tuple[str, str]]: headers = super().get_headers(environ, scope) if self.valid_methods: headers.append(("Allow", ", ".join(self.valid_methods))) return headers class NotAcceptable(HTTPException): """*406* `Not Acceptable` Raise if the server can't return any content conforming to the `Accept` headers of the client. """ code = 406 description = ( "The resource identified by the request is only capable of" " generating response entities which have content" " characteristics not acceptable according to the accept" " headers sent in the request." ) class RequestTimeout(HTTPException): """*408* `Request Timeout` Raise to signalize a timeout. """ code = 408 description = ( "The server closed the network connection because the browser" " didn't finish the request within the specified time." ) class Conflict(HTTPException): """*409* `Conflict` Raise to signal that a request cannot be completed because it conflicts with the current state on the server. .. versionadded:: 0.7 """ code = 409 description = ( "A conflict happened while processing the request. The" " resource might have been modified while the request was being" " processed." ) class Gone(HTTPException): """*410* `Gone` Raise if a resource existed previously and went away without new location. """ code = 410 description = ( "The requested URL is no longer available on this server and" " there is no forwarding address. If you followed a link from a" " foreign page, please contact the author of this page." ) class LengthRequired(HTTPException): """*411* `Length Required` Raise if the browser submitted data but no ``Content-Length`` header which is required for the kind of processing the server does. """ code = 411 description = ( "A request with this method requires a valid <code>Content-" "Length</code> header." ) class PreconditionFailed(HTTPException): """*412* `Precondition Failed` Status code used in combination with ``If-Match``, ``If-None-Match``, or ``If-Unmodified-Since``. """ code = 412 description = ( "The precondition on the request for the URL failed positive evaluation." ) class RequestEntityTooLarge(HTTPException): """*413* `Request Entity Too Large` The status code one should return if the data submitted exceeded a given limit. """ code = 413 description = "The data value transmitted exceeds the capacity limit." class RequestURITooLarge(HTTPException): """*414* `Request URI Too Large` Like *413* but for too long URLs. """ code = 414 description = ( "The length of the requested URL exceeds the capacity limit for" " this server. The request cannot be processed." ) class UnsupportedMediaType(HTTPException): """*415* `Unsupported Media Type` The status code returned if the server is unable to handle the media type the client transmitted. """ code = 415 description = ( "The server does not support the media type transmitted in the request." ) class RequestedRangeNotSatisfiable(HTTPException): """*416* `Requested Range Not Satisfiable` The client asked for an invalid part of the file. .. versionadded:: 0.7 """ code = 416 description = "The server cannot provide the requested range." def __init__( self, length: t.Optional[int] = None, units: str = "bytes", description: t.Optional[str] = None, response: t.Optional["Response"] = None, ) -> None: """Takes an optional `Content-Range` header value based on ``length`` parameter. """ super().__init__(description=description, response=response) self.length = length self.units = units def get_headers( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> t.List[t.Tuple[str, str]]: headers = super().get_headers(environ, scope) if self.length is not None: headers.append(("Content-Range", f"{self.units} */{self.length}")) return headers class ExpectationFailed(HTTPException): """*417* `Expectation Failed` The server cannot meet the requirements of the Expect request-header. .. versionadded:: 0.7 """ code = 417 description = "The server could not meet the requirements of the Expect header" class ImATeapot(HTTPException): """*418* `I'm a teapot` The server should return this if it is a teapot and someone attempted to brew coffee with it. .. versionadded:: 0.7 """ code = 418 description = "This server is a teapot, not a coffee machine" class UnprocessableEntity(HTTPException): """*422* `Unprocessable Entity` Used if the request is well formed, but the instructions are otherwise incorrect. """ code = 422 description = ( "The request was well-formed but was unable to be followed due" " to semantic errors." ) class Locked(HTTPException): """*423* `Locked` Used if the resource that is being accessed is locked. """ code = 423 description = "The resource that is being accessed is locked." class FailedDependency(HTTPException): """*424* `Failed Dependency` Used if the method could not be performed on the resource because the requested action depended on another action and that action failed. """ code = 424 description = ( "The method could not be performed on the resource because the" " requested action depended on another action and that action" " failed." ) class PreconditionRequired(HTTPException): """*428* `Precondition Required` The server requires this request to be conditional, typically to prevent the lost update problem, which is a race condition between two or more clients attempting to update a resource through PUT or DELETE. By requiring each client to include a conditional header ("If-Match" or "If-Unmodified- Since") with the proper value retained from a recent GET request, the server ensures that each client has at least seen the previous revision of the resource. """ code = 428 description = ( "This request is required to be conditional; try using" ' "If-Match" or "If-Unmodified-Since".' ) class _RetryAfter(HTTPException): """Adds an optional ``retry_after`` parameter which will set the ``Retry-After`` header. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. """ def __init__( self, description: t.Optional[str] = None, response: t.Optional["Response"] = None, retry_after: t.Optional[t.Union[datetime, int]] = None, ) -> None: super().__init__(description, response) self.retry_after = retry_after def get_headers( self, environ: t.Optional["WSGIEnvironment"] = None, scope: t.Optional[dict] = None, ) -> t.List[t.Tuple[str, str]]: headers = super().get_headers(environ, scope) if self.retry_after: if isinstance(self.retry_after, datetime): from .http import http_date value = http_date(self.retry_after) else: value = str(self.retry_after) headers.append(("Retry-After", value)) return headers class TooManyRequests(_RetryAfter): """*429* `Too Many Requests` The server is limiting the rate at which this user receives responses, and this request exceeds that rate. (The server may use any convenient method to identify users and their request rates). The server may include a "Retry-After" header to indicate how long the user should wait before retrying. :param retry_after: If given, set the ``Retry-After`` header to this value. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. .. versionchanged:: 1.0 Added ``retry_after`` parameter. """ code = 429 description = "This user has exceeded an allotted request count. Try again later." class RequestHeaderFieldsTooLarge(HTTPException): """*431* `Request Header Fields Too Large` The server refuses to process the request because the header fields are too large. One or more individual fields may be too large, or the set of all headers is too large. """ code = 431 description = "One or more header fields exceeds the maximum size." class UnavailableForLegalReasons(HTTPException): """*451* `Unavailable For Legal Reasons` This status code indicates that the server is denying access to the resource as a consequence of a legal demand. """ code = 451 description = "Unavailable for legal reasons." class InternalServerError(HTTPException): """*500* `Internal Server Error` Raise if an internal server error occurred. This is a good fallback if an unknown error occurred in the dispatcher. .. versionchanged:: 1.0.0 Added the :attr:`original_exception` attribute. """ code = 500 description = ( "The server encountered an internal error and was unable to" " complete your request. Either the server is overloaded or" " there is an error in the application." ) def __init__( self, description: t.Optional[str] = None, response: t.Optional["Response"] = None, original_exception: t.Optional[BaseException] = None, ) -> None: #: The original exception that caused this 500 error. Can be #: used by frameworks to provide context when handling #: unexpected errors. self.original_exception = original_exception super().__init__(description=description, response=response) class NotImplemented(HTTPException): """*501* `Not Implemented` Raise if the application does not support the action requested by the browser. """ code = 501 description = "The server does not support the action requested by the browser." class BadGateway(HTTPException): """*502* `Bad Gateway` If you do proxying in your application you should return this status code if you received an invalid response from the upstream server it accessed in attempting to fulfill the request. """ code = 502 description = ( "The proxy server received an invalid response from an upstream server." ) class ServiceUnavailable(_RetryAfter): """*503* `Service Unavailable` Status code you should return if a service is temporarily unavailable. :param retry_after: If given, set the ``Retry-After`` header to this value. May be an :class:`int` number of seconds or a :class:`~datetime.datetime`. .. versionchanged:: 1.0 Added ``retry_after`` parameter. """ code = 503 description = ( "The server is temporarily unable to service your request due" " to maintenance downtime or capacity problems. Please try" " again later." ) class GatewayTimeout(HTTPException): """*504* `Gateway Timeout` Status code you should return if a connection to an upstream server times out. """ code = 504 description = "The connection to an upstream server timed out." class HTTPVersionNotSupported(HTTPException): """*505* `HTTP Version Not Supported` The server does not support the HTTP protocol version used in the request. """ code = 505 description = ( "The server does not support the HTTP protocol version used in the request." ) default_exceptions: t.Dict[int, t.Type[HTTPException]] = {} def _find_exceptions() -> None: for obj in globals().values(): try: is_http_exception = issubclass(obj, HTTPException) except TypeError: is_http_exception = False if not is_http_exception or obj.code is None: continue old_obj = default_exceptions.get(obj.code, None) if old_obj is not None and issubclass(obj, old_obj): continue default_exceptions[obj.code] = obj _find_exceptions() del _find_exceptions class Aborter: """When passed a dict of code -> exception items it can be used as callable that raises exceptions. If the first argument to the callable is an integer it will be looked up in the mapping, if it's a WSGI application it will be raised in a proxy exception. The rest of the arguments are forwarded to the exception constructor. """ def __init__( self, mapping: t.Optional[t.Dict[int, t.Type[HTTPException]]] = None, extra: t.Optional[t.Dict[int, t.Type[HTTPException]]] = None, ) -> None: if mapping is None: mapping = default_exceptions self.mapping = dict(mapping) if extra is not None: self.mapping.update(extra) def __call__( self, code: t.Union[int, "Response"], *args: t.Any, **kwargs: t.Any ) -> "te.NoReturn": from .sansio.response import Response if isinstance(code, Response): raise HTTPException(response=code) if code not in self.mapping: raise LookupError(f"no exception for {code!r}") raise self.mapping[code](*args, **kwargs) def abort( status: t.Union[int, "Response"], *args: t.Any, **kwargs: t.Any ) -> "te.NoReturn": """Raises an :py:exc:`HTTPException` for the given status code or WSGI application. If a status code is given, it will be looked up in the list of exceptions and will raise that exception. If passed a WSGI application, it will wrap it in a proxy WSGI exception and raise that:: abort(404) # 404 Not Found abort(Response('Hello World')) """ _aborter(status, *args, **kwargs) _aborter: Aborter = Aborter()
PypiClean
/MJOLNIR-1.3.1.tar.gz/MJOLNIR-1.3.1/test/Detector.py
import numpy as np from MJOLNIR.Geometry.Detector import Detector, TubeDetector1D import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt def test_init(): GenericDetector = Detector(position=(0.0,1.0,0.0),direction=(1.0,0,0)) assert(np.all(GenericDetector.position==np.array([0.0,1.0,0.0]))) assert(np.all(GenericDetector.direction==(1.0,0.0,0.0))) def test_Generic_plot(): GenericDetector = Detector(position=(0.0,1.0,0.0),direction=(1.0,0,0)) plt.ioff() fig = plt.figure() ax = fig.add_subplot(projection='3d') try: GenericDetector.plot(ax) assert False except NotImplementedError: assert True def test_TubeDetector_init(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0),pixels=20,length=0.3,diameter=0.025,split=[0,57,57*2]) assert(np.all(TubeDetector.position==np.array([0.0,1.0,0.0]))) assert(np.all(TubeDetector.direction==(1.0,0.0,0.0))) assert(TubeDetector.pixels==20) assert(TubeDetector.length==0.3) assert(TubeDetector.diameter==0.025) assert(np.all(TubeDetector.split==np.array([0,57,57*2]))) def test_TubeDetector_pixels(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0)) try: TubeDetector.pixels=0 assert False except AttributeError: assert True def test_TubeDetector_length(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0)) try: TubeDetector.length=-0.1 assert False except AttributeError: assert True def test_TubeDetector_diameter(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0)) try: TubeDetector.diameter=-0.1 assert False except AttributeError: assert True def test_TubeDetector_split(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0),pixels=100) try: TubeDetector.split=-0.1 assert False except AttributeError: assert True TubeDetector.split=[50,60,100] pixelPos = TubeDetector.getPixelPositions() assert(len(pixelPos)==2) assert(len(pixelPos[0])==10) def test_TubeDetector1D_plot(): TubeDetector = TubeDetector1D(position=(0.0,1.0,0.0),direction=(1.0,0,0)) plt.ioff() fig = plt.figure() ax = fig.add_subplot(projection='3d') TubeDetector.plot(ax) def test_TubeDetector1D_getPixelPositions(): TubeDetector = TubeDetector1D(position=(1.0,0.0,1.0),direction=(1.0,0,0),length=0.5,pixels=5) positions = TubeDetector.getPixelPositions() AssumedPositions = np.array([[0.8,0,1],[0.9,0,1],[1.0,0,1],[1.1,0,1],[1.2,0,1]]) print(positions) print(AssumedPositions) assert(np.all(AssumedPositions==positions))
PypiClean
/Mesa_Adapted-0.8.7.3-py3-none-any.whl/mesa_adapted/visualization/TextVisualization.py
class TextVisualization: """ ASCII-Only visualization of a model. Properties: model: The underlying model object to be visualized. elements: List of visualization elements, which will be rendered in the order they are added. """ def __init__(self, model): """ Create a new Text Visualization object. """ self.model = model self.elements = [] def render(self): """ Render all the text elements, in order. """ for element in self.elements: print(element) def step(self): """ Advance the model by a step and print the results. """ self.model.step() self.render() class TextElement: """ Base class for all TextElements to render. Methods: render: 'Renders' some data into ASCII and returns. __str__: Displays render() by default. """ def __init__(self): pass def render(self): """ Render the element as text. """ return "Placeholder!" def __str__(self): return self.render() class TextData(TextElement): """ Prints the value of one particular variable from the base model. """ def __init__(self, model, var_name): """ Create a new data renderer. """ self.model = model self.var_name = var_name def render(self): return self.var_name + ": " + str(getattr(self.model, self.var_name)) class TextGrid(TextElement): """ Class for creating an ASCII visualization of a basic grid object. By default, assume that each cell is represented by one character, and that empty cells are rendered as ' ' characters. When printed, the TextGrid results in a width x height grid of ascii characters. Properties: grid: The underlying grid object. """ grid = None def __init__(self, grid, converter): """ Create a new ASCII grid visualization. Args: grid: The underlying Grid object. converter: function for converting the content of each cell to ascii. Takes the contents of a cell, and returns a single character. """ self.grid = grid self.converter = converter def render(self): """ What to show when printed. """ viz = "" for y in range(self.grid.height): for x in range(self.grid.width): c = self.grid[y][x] if c is None: viz += " " else: viz += self.converter(c) viz += "\n" return viz
PypiClean
/CWR-API-0.0.40.tar.gz/CWR-API-0.0.40/cwr/grammar/factory/rule.py
from abc import ABCMeta, abstractmethod import pyparsing as pp from cwr.grammar.factory.config import rule_at_least """ Rules factories. """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' __status__ = 'Development' class RuleFactory(object, metaclass=ABCMeta): def __init__(self): pass @abstractmethod def get_rule(self, rule_id): raise NotImplementedError("The get_rule method is not implemented") class FieldRuleFactory(RuleFactory): """ Factory for acquiring field rules. """ def __init__(self, field_configs, adapters): super(FieldRuleFactory, self).__init__() # Fields already created self._fields = {} # Field adapters being used self._adapters = adapters # Configuration for creating the fields self._field_configs = field_configs def get_rule(self, field_id): """ Returns the rule for the field identified by the id. If it is set as not being compulsory, the rule will be adapted to accept string composed only of white characters. :param field_id: unique id in the system for the field :return: the rule of a field """ if field_id in self._fields: # Field already exists field = self._fields[field_id] else: # Field does not exist # It is created field = self._create_field(field_id) # Field is saved self._fields[field_id] = field return field def _create_field(self, field_id): """ Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field """ # Field configuration info config = self._field_configs[field_id] adapter = self._adapters[config['type']] if 'name' in config: name = config['name'] else: name = None if 'size' in config: columns = config['size'] else: columns = None if 'values' in config: values = config['values'] else: values = None field = adapter.get_field(name, columns, values) if 'results_name' in config: field = field.setResultsName(config['results_name']) else: field = field.setResultsName(field_id) return field class DefaultRuleFactory(RuleFactory): def __init__(self, record_configs, field_rule_factory, optional_terminal_rule_decorator, decorators=None): super(DefaultRuleFactory, self).__init__() self._debug = False # Rules already created self._rules = {} # Configuration for creating the record self._record_configs = record_configs self._field_rule_factory = field_rule_factory self._optional_field_rule_decorator = optional_terminal_rule_decorator if decorators: self._decorators = decorators else: self._decorators = {} def get_rule(self, rule_id): if rule_id in self._rules: rule = self._rules[rule_id] else: rule = self._build_rule(rule_id) self._rules[rule_id] = rule return rule def _build_rule(self, rule_id): rule_config = self._record_configs[rule_id] rule_type = rule_config.rule_type if rule_config.rules: rule = self._process_rules(rule_config.rules, pp.And) else: rule = self._build_terminal_rule(rule_config) if rule_type in self._decorators: rule = self._decorators[rule_type].decorate(rule, rule_config) if 'results_name' in rule_config: rule = rule.setResultsName(rule_config['results_name']) else: rule = rule.setResultsName(rule_id) rule.setName(rule_id) if self._debug: rule.setDebug() return rule def _process_rules(self, rules_data, strategy): sequence = [] for rule in rules_data: if rule.rules: rule = self._process_rules_group(rule) else: rule = self._build_terminal_rule(rule) if self._debug: rule.setDebug() sequence.append(rule) return strategy(sequence) def _process_rules_group(self, rules): group = None group_type = rules.list_type data = rules.rules if group_type == 'sequence': group = self._process_rules(data, pp.And) elif group_type == 'option': group = self._process_rules(data, pp.MatchFirst) elif group_type == 'optional': group = pp.Optional(self._process_rules(data, pp.And)) return group def _build_terminal_rule(self, rule): rule_id = rule.rule_name modifiers = rule.rule_options rule_type = rule.rule_type # TODO: This is a patch for an error which should not be happening try: modifiers = modifiers.asList() except AttributeError: modifiers = [] if rule_type == 'field': rule = self._field_rule_factory.get_rule(rule_id) if 'compulsory' not in modifiers: rule = self._optional_field_rule_decorator.decorate(rule, rule_id) rule.setName(rule_id) else: rule = self.get_rule(rule_id) if len(modifiers) > 0: rule = self._apply_modifiers(rule, modifiers) return rule @staticmethod def _apply_modifiers(rule, modifiers): if 'grouped' in modifiers: rule = pp.Group(rule) if 'optional' in modifiers: rule = pp.Optional(rule) else: for modifier in modifiers: if modifier.startswith('at_least'): times = rule_at_least.parseString(modifier)[0] if times > 0: rule_multiple = rule for _ in range(1, times): rule_multiple = rule_multiple + rule rule = rule_multiple + pp.ZeroOrMore(rule) else: rule = pp.Optional(pp.ZeroOrMore(rule)) return rule
PypiClean
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojo/i18n.js
if(!dojo._hasResource["dojo.i18n"]){ dojo._hasResource["dojo.i18n"]=true; dojo.provide("dojo.i18n"); dojo.i18n.getLocalization=function(_1,_2,_3){ _3=dojo.i18n.normalizeLocale(_3); var _4=_3.split("-"); var _5=[_1,"nls",_2].join("."); var _6=dojo._loadedModules[_5]; if(_6){ var _7; for(var i=_4.length;i>0;i--){ var _8=_4.slice(0,i).join("_"); if(_6[_8]){ _7=_6[_8]; break; } } if(!_7){ _7=_6.ROOT; } if(_7){ var _9=function(){ }; _9.prototype=_7; return new _9(); } } throw new Error("Bundle not found: "+_2+" in "+_1+" , locale="+_3); }; dojo.i18n.normalizeLocale=function(_a){ var _b=_a?_a.toLowerCase():dojo.locale; if(_b=="root"){ _b="ROOT"; } return _b; }; dojo.i18n._requireLocalization=function(_c,_d,_e,_f){ var _10=dojo.i18n.normalizeLocale(_e); var _11=[_c,"nls",_d].join("."); var _12=""; if(_f){ var _13=_f.split(","); for(var i=0;i<_13.length;i++){ if(_10["indexOf"](_13[i])==0){ if(_13[i].length>_12.length){ _12=_13[i]; } } } if(!_12){ _12="ROOT"; } } var _14=_f?_12:_10; var _15=dojo._loadedModules[_11]; var _16=null; if(_15){ if(dojo.config.localizationComplete&&_15._built){ return; } var _17=_14.replace(/-/g,"_"); var _18=_11+"."+_17; _16=dojo._loadedModules[_18]; } if(!_16){ _15=dojo["provide"](_11); var _19=dojo._getModuleSymbols(_c); var _1a=_19.concat("nls").join("/"); var _1b; dojo.i18n._searchLocalePath(_14,_f,function(loc){ var _1c=loc.replace(/-/g,"_"); var _1d=_11+"."+_1c; var _1e=false; if(!dojo._loadedModules[_1d]){ dojo["provide"](_1d); var _1f=[_1a]; if(loc!="ROOT"){ _1f.push(loc); } _1f.push(_d); var _20=_1f.join("/")+".js"; _1e=dojo._loadPath(_20,null,function(_21){ var _22=function(){ }; _22.prototype=_1b; _15[_1c]=new _22(); for(var j in _21){ _15[_1c][j]=_21[j]; } }); }else{ _1e=true; } if(_1e&&_15[_1c]){ _1b=_15[_1c]; }else{ _15[_1c]=_1b; } if(_f){ return true; } }); } if(_f&&_10!=_12){ _15[_10.replace(/-/g,"_")]=_15[_12.replace(/-/g,"_")]; } }; (function(){ var _23=dojo.config.extraLocale; if(_23){ if(!_23 instanceof Array){ _23=[_23]; } var req=dojo.i18n._requireLocalization; dojo.i18n._requireLocalization=function(m,b,_24,_25){ req(m,b,_24,_25); if(_24){ return; } for(var i=0;i<_23.length;i++){ req(m,b,_23[i],_25); } }; } })(); dojo.i18n._searchLocalePath=function(_26,_27,_28){ _26=dojo.i18n.normalizeLocale(_26); var _29=_26.split("-"); var _2a=[]; for(var i=_29.length;i>0;i--){ _2a.push(_29.slice(0,i).join("-")); } _2a.push(false); if(_27){ _2a.reverse(); } for(var j=_2a.length-1;j>=0;j--){ var loc=_2a[j]||"ROOT"; var _2b=_28(loc); if(_2b){ break; } } }; dojo.i18n._preloadLocalizations=function(_2c,_2d){ function _2e(_2f){ _2f=dojo.i18n.normalizeLocale(_2f); dojo.i18n._searchLocalePath(_2f,true,function(loc){ for(var i=0;i<_2d.length;i++){ if(_2d[i]==loc){ dojo["require"](_2c+"_"+loc); return true; } } return false; }); }; _2e(); var _30=dojo.config.extraLocale||[]; for(var i=0;i<_30.length;i++){ _2e(_30[i]); } }; }
PypiClean
/GenMotion-0.0.4-py3-none-any.whl/genmotion/algorithm/humanmotionrnn/data_utils.py
import numpy as np import torch import copy from .params import * def readCSVasFloat(filename): """ Borrowed from SRNN code. Reads a csv and returns a float matrix. https://github.com/asheshjain399/NeuralModels/blob/master/neuralmodels/utils.py#L34 Args filename: string. Path to the csv file Returns returnArray: the read data in a float32 matrix """ returnArray = [] lines = open(filename).readlines() for line in lines: line = line.strip().split(',') if len(line) > 0: returnArray.append(np.array([np.float32(x) for x in line])) returnArray = np.array(returnArray) return returnArray #!/usr/bin/python # -*- coding: utf-8 -*- def load_data( path_to_dataset, subjects, actions, one_hot, ): """ Borrowed from SRNN code. This is how the SRNN code reads the provided .txt files https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/processdata.py#L270 Args path_to_dataset: string. directory where the data resides subjects: list of numbers. The subjects to load actions: list of string. The actions to load one_hot: Whether to add a one-hot encoding to the data Returns trainData: dictionary with k:v k=(subject, action, subaction, 'even'), v=(nxd) un-normalized data completeData: nxd matrix with all the data. Used to normlization stats """ nactions = len(actions) trainData = {} completeData = [] for subj in subjects: for action_idx in np.arange(len(actions)): action = actions[action_idx] for subact in [1, 2]: # subactions print ('Reading subject {0}, action {1}, subaction {2}'.format(subj, action, subact)) filename = \ '{0}/S{1}/{2}_{3}.txt'.format(path_to_dataset, subj, action, subact) action_sequence = readCSVasFloat(filename) (n, d) = action_sequence.shape even_list = range(0, n, 2) if one_hot: # Add a one-hot encoding at the end of the representation the_sequence = np.zeros((len(even_list), d + nactions), dtype=float) the_sequence[:, 0:d] = action_sequence[even_list, :] the_sequence[:, d + action_idx] = 1 trainData[(subj, action, subact, 'even')] = \ the_sequence else: trainData[(subj, action, subact, 'even')] = \ action_sequence[even_list, :] if len(completeData) == 0: completeData = copy.deepcopy(action_sequence) else: completeData = np.append(completeData, action_sequence, axis=0) return (trainData, completeData) def normalize_data( data, data_mean, data_std, dim_to_use, actions, one_hot ): """ Normalize input data by removing unused dimensions, subtracting the mean and dividing by the standard deviation Args data: nx99 matrix with data to normalize data_mean: vector of mean used to normalize the data data_std: vector of standard deviation used to normalize the data dim_to_use: vector with dimensions used by the model actions: list of strings with the encoded actions one_hot: whether the data comes with one-hot encoding Returns data_out: the passed data matrix, but normalized """ data_out = {} nactions = len(actions) if not one_hot: # No one-hot encoding... no need to do anything special for key in data.keys(): data_out[ key ] = np.divide( (data[key] - data_mean), data_std ) data_out[ key ] = data_out[ key ][ :, dim_to_use ] else: # TODO hard-coding 99 dimensions for un-normalized human poses for key in data.keys(): data_out[ key ] = np.divide( (data[key][:, 0:99] - data_mean), data_std ) data_out[ key ] = data_out[ key ][ :, dim_to_use ] data_out[ key ] = np.hstack( (data_out[key], data[key][:,-nactions:]) ) return data_out def normalization_stats(completeData): """" Also borrowed for SRNN code. Computes mean, stdev and dimensions to ignore. https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/CRFProblems/H3.6m/processdata.py#L33 Args completeData: nx99 matrix with data to normalize Returns data_mean: vector of mean used to normalize the data data_std: vector of standard deviation used to normalize the data dimensions_to_ignore: vector with dimensions not used by the model dimensions_to_use: vector with dimensions used by the model """ data_mean = np.mean(completeData, axis=0) data_std = np.std(completeData, axis=0) dimensions_to_ignore = [] dimensions_to_use = [] dimensions_to_ignore.extend( list(np.where(data_std < 1e-4)[0]) ) dimensions_to_use.extend( list(np.where(data_std >= 1e-4)[0]) ) data_std[dimensions_to_ignore] = 1.0 return data_mean, data_std, dimensions_to_ignore, dimensions_to_use class HumanMotionDataset(torch.utils.data.Dataset): # define normalization statistics data_mean = None data_std = None dim_to_ignore = None dim_to_use = None def __init__(self, data_dir, subject_ids, actions, one_hot, is_train = True): super().__init__() # load data self.data_dir = data_dir self.subject_ids = subject_ids self.actions = actions self.one_hot = one_hot self.raw_data, self.complete_dataset = load_data( data_dir, subject_ids, actions, one_hot) # model_input information self.input_size = HUMAN_SIZE + len(self.actions) # compute normalization stats and normalize data self.is_train = is_train # is training dataset or not if self.is_train: HumanMotionDataset.data_mean, HumanMotionDataset.data_std, HumanMotionDataset.dim_to_ignore, HumanMotionDataset.dim_to_use = normalization_stats(self.complete_dataset) self.normalized_data = normalize_data( self.raw_data, self.data_mean, self.data_std, self.dim_to_use, self.actions, self.one_hot ) else: assert HumanMotionDataset.data_mean is not None, "Must initialize training dataset first" self.normalized_data = normalize_data( self.raw_data, HumanMotionDataset.data_mean, HumanMotionDataset.data_std, HumanMotionDataset.dim_to_use, self.actions, self.one_hot ) # calculate dataset length self.all_keys = list(self.normalized_data) self.length = self._calculate_data_length() def _calculate_data_length(self, fix_length = 10000): # calculate the total length of the dataset if fix_length > 0: if self.is_train: return fix_length else: return fix_length // 10 # TODO: full-dataset training, which requires a lot of efforts. # else: def __len__(self): return self.length def __getitem__(self, idx): chosen_key = np.random.choice(len(self.all_keys)) # How many frames in total do we need? total_frames = source_seq_len + target_seq_len the_key = self.all_keys[chosen_key] # Get the number of frames n, _ = self.normalized_data[ the_key ].shape # Sample somewherein the middle idx = np.random.randint( 16, n-total_frames ) # Select the data around the sampled points data_sel = self.normalized_data[ the_key ][idx:idx+total_frames ,:] # Add the data encoder_input = data_sel[0:source_seq_len-1, :] decoder_input = data_sel[source_seq_len-1:source_seq_len + target_seq_len-1, :] decoder_output = data_sel[source_seq_len:, 0:self.input_size] return encoder_input, decoder_input, decoder_output
PypiClean
/frmt-2.0.0.tar.gz/frmt-2.0.0/frmt.py
import math from copy import deepcopy try: from shutil import get_terminal_size except ImportError: from backports.shutil_get_terminal_size import get_terminal_size def format_fit(text, width=None, align='<', suffix="..."): """ Fits a piece of text to ``width`` characters by truncating too long text and padding too short text with spaces. Defaults to terminal width. Truncation is indicated by a customizable suffix. ``align`` specifies the alignment of the contents if it is padded, and can be: * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned """ if width==None: width = get_terminal_size().columns if len(text)>width: if len(suffix)>width: return suffix[len(suffix)-width:] else: return text[:width-len(suffix)]+suffix else: return "{{:{}{{w}}}}".format(align).format(text,w=width) def format_time(seconds): """ Formats a string from time given in seconds. For large times (``abs(seconds) >= 60``) the format is:: dd:hh:mm:ss For small times (``abs(seconds) < 60``), the result is given in 3 significant figures, with units given in seconds and a suitable SI-prefix. """ if not isinstance(seconds, (int, float)): return str(seconds) if math.isnan(seconds): return "-" if abs(seconds)<60: return format_time_small(seconds) else: return format_time_large(seconds) def format_time_small(seconds): """ Same as format_time() but always uses SI-prefix and 3 significant figures. """ if not isinstance(seconds, (int, float)): return str(seconds) if math.isnan(seconds): return "-" if abs(seconds)<1: milliseconds = 1000*seconds if abs(milliseconds)<1: microseconds = 1000*milliseconds if abs(microseconds)<1: nanoseconds = 1000*microseconds if abs(nanoseconds)<0.5: return "0" else: return "{:.0f}ns".format(nanoseconds) elif abs(microseconds)<10: return "{:.2f}us".format(microseconds) elif abs(microseconds)<100: return "{:.1f}us".format(microseconds) else: return "{:.0f}us".format(microseconds) elif abs(milliseconds)<10: return "{:.2f}ms".format(milliseconds) elif abs(milliseconds)<100: return "{:.1f}ms".format(milliseconds) else: return "{:.0f}ms".format(milliseconds) elif abs(seconds)<10: return "{:.2f}s".format(seconds) elif abs(seconds)<100: return "{:.1f}s".format(seconds) else: return "{:.0f}s".format(seconds) def format_time_large(seconds): """ Same as format_time() but always uses the format dd:hh:mm:ss. """ if not isinstance(seconds, (int, float)): return str(seconds) if math.isnan(seconds): return "-" seconds = int(round(seconds)) if abs(seconds)<60: return "{:d}".format(seconds) else: minutes = int(seconds/60) seconds %= 60 if abs(minutes)<60: return "{:d}:{:02d}".format(minutes,seconds) else: hours = int(minutes/60) minutes %= 60 if abs(hours)<24: return "{:d}:{:02d}:{:02d}".format(hours,minutes,seconds) else: days = int(hours/24) hours %= 24 return "{:d}:{:02d}:{:02d}:{:02d}".format( days,hours,minutes,seconds) def format_table(table, align='<', format='{:.3g}', colwidth=None, maxwidth=None, spacing=2, truncate=0, suffix="..." ): """ Formats a table represented as an iterable of iterable into a nice big string suitable for printing. Parameters: ----------- align : string or list of strings Alignment of cell contents. Each character in a string specifies the alignment of one column. * ``<`` - Left aligned (default) * ``^`` - Centered * ``>`` - Right aligned The last alignment is repeated for unspecified columns. If it's a list of strings, each string specifies the alignment of one row. The last string is used repeatedly for unspecified rows. format : string/function, or (nested) list of string/function Formats the contents of the cells using the specified function(s) or format string(s). If it's a list of strings/functions each entry specifies formatting for one column, the last entry being used repeatedly for unspecified columns. If it's a list of lists, each sub-list specifies one row, the last sub-list being used repeatedly for unspecified rows. colwidth : int, list of ints or None The width of each column. The last width is used repeatedly for unspecified columns. If ``None`` the width is fitted to the contents. maxwidth : int or None The maximum width of the table. Defaults to terminal width minus 1 if ``None``. If the table would be wider than ``maxwidth`` one of the columns is truncated. spacing : int The spacing between columns truncate : int Which column to truncate if table width would exceed ``maxwidth``. Beware that no columns can have zero or negative width. If for instance 'maxwidth' is 80 and 'colwidth' is [10, 30, 30, 30] with spacing 2 the total width will initially be 10+2+30+2+30+2+30=106. That's 26 characters too much, so a width of 26 will be removed from the truncated column. If 'truncate' is 0, column 0 will have a width of -16 which is not permitted. """ table = list(deepcopy(table)) if not isinstance(align, list): align = [align] if not isinstance(format, list): format = [format] if not isinstance(format[0], list): format = [format] num_cols = len(table[0]) if len(set([len(row) for row in table]))>1: raise ValueError("All rows must have the same number of columns") for i in range(len(table)): table[i] = list(table[i]) colformat = format[min(i,len(format)-1)] for j, cell in enumerate(table[i]): f = colformat[min(j,len(colformat)-1)] if isinstance(f, str): fun = lambda x: f.format(x) else: fun = f try: table[i][j] = fun(cell) except: table[i][j] = str(cell) if colwidth==None: cellwidth = [[len(cell) for cell in row] for row in table] colwidth = list(map(max, zip(*cellwidth))) elif not isinstance(colwidth, list): colwidth = [colwidth] colwidth.extend([colwidth[-1]]*(num_cols-len(colwidth))) if maxwidth==None: maxwidth = get_terminal_size().columns-1 width = sum(colwidth)+spacing*(num_cols-1) if width>maxwidth: colwidth[truncate] -= (width-maxwidth) for j, cw in enumerate(colwidth): if cw<1: raise RuntimeError("Column {} in format_table() has width {}. " "Make sure all columns have width >0. " "Read docstring for further details." .format(j,cw) ) s = '' for i, row in enumerate(table): if i != 0: s += "\n" colalign = align[min(i,len(align)-1)] colformat = format[min(i,len(format)-1)] for j, col in enumerate(row): a = colalign[min(j,len(colalign)-1)] f = colformat[min(j,len(colformat)-1)] w = colwidth[j] if j!=0: s+= ' '*spacing s += format_fit(format_time(col), w, a, suffix) return s def print_table(*args, **kwargs): print(format_table(*args, **kwargs)) def print_time(*args, **kwargs): print(format_time(*args, **kwargs)) def print_time_large(*args, **kwargs): print(format_time_large(*args, **kwargs)) def print_time_small(*args, **kwargs): print(format_time_small(*args, **kwargs)) def print_fit(*args, **kwargs): print(format_fit(*args, **kwargs))
PypiClean
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/static/djblets/js/utils/urls.es6.js
Djblets.buildURL = function(options={}) { let url = options.baseURL || ''; /* Build the query string, if any. */ const queryData = options.queryData; if (queryData) { let queryString; if (typeof queryData === 'string') { queryString = queryData; if (queryString.indexOf('?') === 0) { queryString = queryString.substr(1); } } else { queryString = $.param(queryData); } if (queryString) { url += `?${queryString}`; } } /* Append an anchor, if any. */ const anchor = options.anchor; if (anchor) { if (anchor.indexOf('#') === 0) { url += anchor; } else { url += `#${anchor}`; } } return url; }; /** * Parse a query string for key/value pairs. * * This takes a query string in the provided URL and parses it for standard * key/value pairs, returning an object representing those keys and values. * It can handle keys without values and optionally store multiple values * listed for the same key. * * Args: * url (string): * The URL containing a query string to parse. * * options (object, optional): * Options for controlling the parsing. * * Option Args: * allowMultiValue (boolean): * Whether to store multiple values for the same key, if found in * the query string. The value for such a key will be an array of all * values. If ``false`` (the default), only last value for a key will * be stored. * * Returns: * object: * The resulting keys and values representing the query string. * * If there was a query string item that did not have a value (in other * words, no ``=`` was present), then its value will be ``null``. * * If ``options.allowMultiValue`` is ``true``, and a key was used more * than once, then its value will be a list of all values in the query * string for that key. */ Djblets.parseQueryString = function(url, options={}) { const allowMultiValue = options.allowMultiValue; let j = url.indexOf('?'); let queryString; if (j === -1) { /* Assume the whole thing is a query string. */ queryString = url; } else { queryString = url.substr(j + 1); } const query = {}; if (queryString.length === 0) { return query; } const queryParams = queryString.split('&'); for (let i = 0; i < queryParams.length; i++) { const queryParam = queryParams[i]; let key; let value; j = queryParam.indexOf('='); if (j === -1) { key = decodeURIComponent(queryParam); value = null; } else { key = decodeURIComponent(queryParam.substr(0, j)); value = decodeURIComponent(queryParam.substr(j + 1)); } if (allowMultiValue && query.hasOwnProperty(key)) { if (_.isArray(query[key])) { query[key].push(value); } else { query[key] = [query[key], value]; } } else { query[key] = value; } } return query; };
PypiClean
/BiGG_SABIO-0.0.1.tar.gz/BiGG_SABIO-0.0.1/README.rst
Acquire SABIO-RK Kinetics Data for an arbitrary BiGG model ------------------------------------------------------------------------- |License| .. |PyPI version| image:: https://img.shields.io/pypi/v/bigg_sabio.svg?logo=PyPI&logoColor=brightgreen :target: https://pypi.org/project/bigg_sabio/ :alt: PyPI version .. |Actions Status| image:: https://github.com/freiburgermsu/bigg_sabio/workflows/Test%20bigg_sabio/badge.svg :target: https://github.com/freiburgermsu/bigg_sabio/actions :alt: Actions Status .. |License| image:: https://img.shields.io/badge/License-MIT-blue.svg :target: https://opensource.org/licenses/MIT :alt: License .. |Downloads| image:: https://pepy.tech/badge/bigg_sabio :target: https://pepy.tech/project/bigg_sabio :alt: Downloads Reaction kinetics data is a pillar of biochemical research, and particularly computational biology. Sources of this data, however, are infrequently accessible to programmatic workflows, such as Dynamic Flux Balance Analysis (dFBA), which hinders research progress. The ``BiGG_SABIO`` library attempts to bridge this gab by scraping `SABIO-RK <http://sabio.h-its.org/>`_ kinetics data from any BiGG model-formatted JSON file, which is a powerful ability metabolic and dFBA researchers. SABIO-RK supports this use of this website in its `statement of webservices <http://sabio.h-its.org/layouts/content/webservices.gsp>`_. Examples Notebook are available in the examples directory of the `BiGG_SABIO GitHub repository <https://github.com/freiburgermsu/BiGG_SABIO/examples>`_. Please submit errors, inquiries, or suggestions as `GitHub issues <https://github.com/freiburgermsu/BiGG_SABIO/issues>`_ where they can be addressed. ____________ ---------------------- Installation ---------------------- ``BiGG_SABIO`` is installed in a command prompt, Powershell, Terminal, or Anaconda Command Prompt via ``pip``:: pip install bigg_sabio --------------- __init__() --------------- The scraping is initiated through four arguments: .. code-block:: python import bigg_sabio bgsb = bigg_sabio.SABIO_scraping(bigg_model_path, bigg_model_name = None, export_model_content = False, verbose = False) - *bigg_model_path* ``str``: specifies the path to the JSON file of the BiGG model that will be parsed. - *bigg_model_name* ``str``: specifies the name of the BiGG model, which will be used to identify the model and name the output folder directory, where ``None`` defaults the name of the file from the ``bigg_model_path`` parameter. - *export_model_content* ``bool``: specifies where parsed information about the BiGG model will be of the SBML file for the `BiGG model <http://bigg.ucsd.edu/>`_ that will be simulated. - *verbose* & *printing* ``bool``: specifies whether simulation details (which is valuable for trobuleshooting) and results, respectively, will be printed. ------------------- complete() ------------------- The complete scraping process is concisely conducted through a single function, which references the object variables that are defined through the ``__init__()`` function: .. code-block:: python import bigg_sabio bgsb = bigg_sabio.SABIO_scraping(bigg_model_path, bigg_model_name = None, export_model_content = False, verbose = False) bgsb.complete() ____________ Individual functions ------------------------------------------------------------------------- The steps of acquiring and processing SABIO data into input files of kinetic data for dFBA simulations can be individual executed on demand. These steps and functions are detailed in the following sections. ------------------- scrape_bigg_xls() ------------------- This function is the first step in BiGG_SABIO workflow, where a Selenium WebDriver is directed through the advanced search options of SABIO and proceeds to download all of the search results that match annotations from the BiGG model. These numerous XLS files, at the end of the scraping process, are concatenated into a spreadsheet with the duplicate rows are removed to yield a complete CSV file of the SABIO kinetics data for the respective BiGG model. The identities and values for each parameter are subsequently scraped, and assembled and downloaded as a separate JSON file. ------------------- to_fba() ------------------- This is the final step in BiGG_SABIO workflow, where the complete assemblage of SABIO kinetics data is refined into a structure that is amenable with the `dFBAy <https://github.com/freiburgermsu/dFBApy>`_ module. -------------------------------------- Executing the individual functions -------------------------------------- The individual functions can be executed through the following sequence: .. code-block:: python import bigg_sabio bgsb = bigg_sabio.SABIO_scraping(bigg_model_path, bigg_model_name = None, export_model_content = False, verbose = False) bgsb.scrape_bigg_xls() bgsb.to_fba() ____________ Accessible content ______________________ A multitude of values are stored within the ``SABIO_scraping`` object that can be subsequently referenced and used in a workflow. The complete list of content within the ``SABIO_scraping`` object can be printed through the built-in ``dir()`` function: .. code-block:: python # Scrape data for a BiGG model from bigg_sabio import SABIO_scraping bgsb = SABIO_scraping(bigg_model_path, bigg_model_name = None, export_model_content = False, verbose = False) print(dir(bgsb)) The following list highlights stored content in the ``SABIO_scraping`` object after a simulation: - *model* & *model_contents* ``dict``: The loaded BiGG model and a parsed form of the model, respectively, that are interpreted and guide the scraping of reaction enzymes. - *sabio_df* ``Pandas.DataFrame``: A concatenated DataFrame that embodies all of the downloaded XLS files from the model enzymes. - *paths*, *parameters*, & *variables* ``dict``: Dictionaries of 1) the essential paths from the scraping, which may be useful to locate and programmatically access each file; 2) important parameters that were parameterized; and 3) the variable values or files that derived from the scraping, respectively. - *bigg_to_sabio_metabolites*, *sabio_to_bigg_metabolites*, & *bigg_reactions* ``dict``: Comprehensive dictionaries for the ID codes of BiGG metabolites and reactions, respectively. The ``bigg_to_sabio_metabolites`` dictionary is indexed with keys of BiGG ID and values of metabolite names that are recognized by SABIO and BiGG, whereas the ``bigg_to_sabio_metabolites`` dictionary is indexed with keys of SABIO metabolite names and values of the corresponding BiGG IDs. - *driver* & *fp* ``Selenium.Webdriver``: The Firefox browser driver and profile, respectively, that are used programmatically by `Selenium functions <https://selenium-python.readthedocs.io/api.html>`_ to access and navigate the SABIO-RK database website. - *step_number* ``int``: An indication of the progression within the scraping workflow, which is enumerated in the ``main()`` function of the script.
PypiClean
/cowsuper-0.0.2-py3-none-any.whl/cowsuper/examples/datasets/generate_weak_signals.py
import numpy as np import pandas as pd import nltk import re import matplotlib.pyplot as plt from json import JSONDecoder from functools import partial import json from pprint import pprint from bs4 import BeautifulSoup from nltk.tokenize import WordPunctTokenizer from nltk.corpus import stopwords from nltk.stem import PorterStemmer, LancasterStemmer import mxnet as mx # Preprocessing steps stemmer = LancasterStemmer() """ To use glove class, download 'glove.42B.300d.txt' from https://www.kaggle.com/yutanakamura/glove42b300dtxt File is too large to save to github """ df = pd.read_csv('glove.42B.300d.txt', sep=" ", quoting=3, header=None, index_col=0) glove_model = {key: val.values for key, val in df.T.items()} # # # # # # # # # # # Helper Code # # # # # # # # # # # def decodeHTMLencoding(tweets): decoded_tweets = tweets.applymap(lambda tweet: BeautifulSoup(tweet, 'lxml').get_text()) return decoded_tweets def removeStopWords(text): stopw = stopwords.words('english') words = [word for word in text.split() if len(word) > 3 and not word in stopw] # get stems from words for i in range(len(words)): words[i] = stemmer.stem(words[i]) return (" ".join(words)).strip() def cleanTweets(tweets): """ cleans tweets by removing unwanted symbols and text into a workable format :param tweets: tweet data :type tweets: numpy array of strings """ # decode tweets from html tags cleaned_tweets = decodeHTMLencoding(tweets) # remove URLs that starts with http cleaned_tweets = cleaned_tweets.applymap(lambda tweet: re.sub( r'https?:\/\/(www\.)?[-a-zA-Z0–9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0–9@:%_\+.~#?&//=]*)', '', tweet, flags=re.MULTILINE) ) # remove URLs that does not start with http cleaned_tweets = cleaned_tweets.applymap(lambda tweet: re.sub( r'[-a-zA-Z0–9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0–9@:%_\+.~#?&//=]*)', '', tweet, flags=re.MULTILINE)) # remove @ cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'@[A-Za-z0-9_]+', '', tweet, flags=re.MULTILINE) ) # remove # cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'#[A-Za-z0-9_]+', '', tweet, flags=re.MULTILINE) ) # remove RT cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub('RT ', '', tweet, flags=re.MULTILINE) ) # remove symbols and numbers (i.e keep letters only) cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub("[^a-zA-Z]"," ",tweet, flags=re.MULTILINE) ) #replace consecutive non-ASCII characters with a space cleaned_tweets = cleaned_tweets.applymap( lambda tweet: re.sub(r'[^\x00-\x7F]+'," ",tweet.lower(), flags=re.MULTILINE) ) # This line get rid of some stuff in training data but not labels or weak signals # cleaned_tweets.drop_duplicates(inplace=True) cleaned_tweets.replace('', np.nan, inplace=True) cleaned_tweets.dropna(inplace=True) return cleaned_tweets def get_text_vectors(tweets, model): """ cleans tweets by removing unwanted symbols and text into a workable format :param tweets: tweet data :type tweets: numpy array of strings """ # dataset should be a pandas dataframe dimension = 300 data_array = np.empty(shape=[0, dimension]) indexes = [] for i, tweet in enumerate(tweets): words = tweet.split() if len(words) !=0: feature = 0 for word in words: try: feature += model[word] except: pass feature /= len(words) try: if feature.size == dimension: data_array = np.append(data_array, [feature], axis=0) indexes.append(i) except: continue indexes = np.asarray(indexes) assert indexes.size == data_array.shape[0] return data_array, indexes def remove_indices(weak_signals): """ remove indexes of weak_signals that do not have coverage :param weak_signals: list of weak signals for all examples :type weak_signals: numpy array of strings """ indices = np.where(np.sum(weak_signals, axis=1) == -1*weak_signals.shape[1])[0] weak_signals = np.delete(weak_signals, indices, axis=0) return weak_signals, indices def keyword_labeling(data, keywords, sentiment='pos'): """ finds data points that belong to either to pos or negative classes :param data: text data to look through :type data: np array of strings :param keywords: keywords to detect if a word belongs to a class :type keywords: lists of lists of strings to detect if a word belongs to a class :param sentiment: positive or negative class :type sentiment: defualt positive class unless otherwise specified """ mask = 1 if sentiment == 'pos' else 0 weak_signals = [] for terms in keywords: weak_signal = [] for text in data: label=-1 for word in terms: if word in text.lower(): label = mask weak_signal.append(label) weak_signals.append(weak_signal) return np.asarray(weak_signals).T POSITIVE_LABELS = [['good','great','nice','delight','wonderful'], ['love', 'best', 'genuine','well', 'thriller'], ['clever','enjoy','fine','deliver','fascinating'], ['super','excellent','charming','pleasure','strong'], ['fresh','comedy', 'interesting','fun','entertain', 'charm', 'clever'], ['amazing','romantic','intelligent','classic','stunning'], ['rich','compelling','delicious', 'intriguing','smart']] NEGATIVE_LABELS = [['bad','better','leave','never','disaster'], ['nothing','action','fail','suck','difficult'], ['mess','dull','dumb', 'bland','outrageous'], ['slow', 'terrible', 'boring', 'insult','weird','damn'], ['drag','awful','waste', 'flat','worse'], #['drag','no','not','awful','waste', 'flat'], ['horrible','ridiculous','stupid', 'annoying','painful'], ['poor','pathetic','pointless','offensive','silly']] # Bellow two functions take from ./cll/model_utilites.py def calculate_bounds(true_labels, predicted_labels, mask=None): """ Calculate error rate on data points the weak signals label """ if len(true_labels.shape) == 1: predicted_labels = predicted_labels.ravel() assert predicted_labels.shape == true_labels.shape if mask is None: mask = np.ones(predicted_labels.shape) if len(true_labels.shape) == 1: mask = mask.ravel() error_rate = true_labels*(1-predicted_labels) + \ predicted_labels*(1-true_labels) with np.errstate(divide='ignore', invalid='ignore'): error_rate = np.sum(error_rate*mask, axis=0) / np.sum(mask, axis=0) error_rate = np.nan_to_num(error_rate) # check results are scalars if np.isscalar(error_rate): error_rate = np.asarray([error_rate]) return error_rate def get_error_bounds(true_labels, weak_signals): """ Get error bounds of the weaks signals returns a list of size num_weak x num_classes """ error_bounds = [] mask = weak_signals >= 0 for i, weak_probs in enumerate(weak_signals): active_mask = mask[i] error_rate = calculate_bounds(true_labels, weak_probs, active_mask) error_bounds.append(error_rate) return error_bounds # # # # # # # # # # # # # SST-2 Dataset # # # # # # # # # # # # def SST_2_generator(): """ breaks down data from SST dataset """ # get data from files datapath = './sst-2/' train_data = pd.read_csv(datapath+'sst2-train.csv') test_data = pd.read_csv(datapath+'sst2-test.csv') train_data.head() # get labes from data train_labels = train_data.label.values test_labels = test_data.label.values # remove labels from data train_data = cleanTweets(train_data.drop(columns=['label'])) test_data = cleanTweets(test_data.drop(columns=['label'])) NEGATIVE_LABELS = [['bad','better','leave','never','disaster'], ['nothing','action','fail','suck','difficult'], ['mess','dull','dumb', 'bland','outrageous'], ['slow', 'terrible', 'boring', 'insult','weird','damn'], # ['drag','awful','waste', 'flat','worse'], ['drag','no','not','awful','waste', 'flat'], ['horrible','ridiculous','stupid', 'annoying','painful'], ['poor','pathetic','pointless','offensive','silly']] # get weak signals positive_labels = keyword_labeling(train_data.sentence.values, POSITIVE_LABELS) negative_labels = keyword_labeling(train_data.sentence.values, NEGATIVE_LABELS, sentiment='neg') weak_signals = np.hstack([positive_labels, negative_labels]) weak_signals.shape # Clean data and reset index train_data.reset_index(drop=True, inplace=True) # convert dataframe to nparrays train_data = train_data.values test_data = test_data.values print(train_data.shape, train_labels.shape) print(test_data.shape, test_labels.shape) # remove data points no covered by weak signals weak_signals, indices = remove_indices(weak_signals) weak_signals.shape train_data = np.delete(train_data, indices, axis=0) train_labels = np.delete(train_labels, indices) # indices = indices # # # # # # # # # # # Fix Code later # # # # # # # # # # # # n,m = weak_signals.shape # weak_signal_probabilities = weak_signals.T.reshape(m,n,1) # weak_signals_mask = weak_signal_probabilities >=0 # true_error_rates = get_error_bounds(train_labels, weak_signal_probabilities, weak_signals_mask) # print("error: ", np.asarray(true_error_rates)) # Convert data from np arrays of np arrays to np arrays of strings train_data = train_data.flatten() test_data = test_data.flatten() print(train_data.shape, train_labels.shape) print(test_data.shape, test_labels.shape) train_features, train_index = get_text_vectors(train_data, glove_model) test_features, test_index = get_text_vectors(test_data, glove_model) # save sst-2 data np.save(datapath+'data_features.npy', train_features) np.save(datapath+'test_features.npy', test_features) # save sst-2 labels np.save(datapath+'data_labels.npy', train_labels) np.save(datapath+'test_labels.npy', test_labels) # save the one-hot signals np.save(datapath+'weak_signals.npy', weak_signals) # # # # # # # # # # IMDB Dataset # # # # # # # # # # def IMDB_generator(): """ breaks down data from IMDB dataset """ datapath = './imdb/' df = pd.read_csv(datapath+'IMDB Dataset.csv') # apply on train data cleaned_data = cleanTweets(df.drop(columns=['sentiment'])) indexes = cleaned_data.index.values df.shape, indexes.size n = indexes.size # get test data np.random.seed(50) test_indexes = np.random.choice(indexes, int(n*0.2), replace=False) test_labels = np.zeros(test_indexes.size) test_labels[df.sentiment.values[test_indexes]=='positive'] = 1 test_data = df.review.values[test_indexes] train_indexes = np.delete(indexes, [np.where(indexes == i)[0][0] for i in test_indexes]) train_labels = np.zeros(train_indexes.size) train_labels[df.sentiment.values[train_indexes]=='positive'] = 1 train_data = df.review.values[train_indexes] print(train_data.shape, train_labels.shape) print(test_data.shape, test_labels.shape) positive_labels = keyword_labeling(train_data, [['good'],['wonderful'],['great'],['amazing'],['excellent']], sentiment='pos') negative_labels = keyword_labeling(train_data, [['bad'],['horrible'],['sucks'],['awful'],['terrible']], sentiment='neg') weak_signals = np.hstack([positive_labels, negative_labels]) weak_signals, indices = remove_indices(weak_signals) weak_signals.shape # add signals not covered to test data test_data = np.append(test_data, train_data[indices]) test_labels = np.append(test_labels, train_labels[indices]) # delete train data not covered by weak signals train_data = np.delete(train_data, indices, axis=0) train_labels = np.delete(train_labels, indices) # get data features train_features, train_index = get_text_vectors(train_data, glove_model) test_features, test_index = get_text_vectors(test_data, glove_model) print(train_index.size, train_data.shape[0]) test_index.size, test_labels.size # save imdb data np.save(datapath+'data_features.npy', train_features) np.save(datapath+'test_features.npy', test_features) # save imdb labels np.save(datapath+'data_labels.npy', train_labels[train_index]) np.save(datapath+'test_labels.npy', test_labels[test_index]) # save the weak_signals np.save(datapath+'weak_signals.npy', weak_signals[train_index]) print("\n\n working on SST_2 \n\n" ) SST_2_generator() print("\n\n working on IMDB \n\n" ) IMDB_generator()
PypiClean
/MeepMeep-0.6.1.tar.gz/MeepMeep-0.6.1/meepmeep/knot2d.py
from numpy import asarray, zeros, arctan2 from .utils import as_from_rhop, i_from_baew from .xy.position import pd_t15, solve_xy_p5s, xy_t15 from .xy.derivatives import pd_with_derivatives_v, xy_derivative_coeffs from .xy.par_direct import diffs as diffs_natural from .xy.par_fitting import partial_derivatives as diffs_fitting class Knot2D: def __init__(self, phase: float, t0: float, p: float, a: float, i: float, e: float, w: float, derivatives: bool = False): self.derivatives = derivatives self.phase = phase self.t0 = t0 self.p = p self.a = a self.i = i self.e = e self.w = w self._coeffs = solve_xy_p5s(phase, p, a, i, e, w) if derivatives: self._c_derivative_coeffs() def _c_derivative_coeffs(self): d = diffs_natural(self.p, self.a, self.i, self.e, self.w, 1e-4) self._coeffs_d = xy_derivative_coeffs(d, 1e-4, self._coeffs) def position(self, t): return xy_t15(t, self.t0, self.p, self._coeffs) def projected_distance(self, t): if self.derivatives: return pd_with_derivatives_v(t, self.t0, self.p, self._coeffs, self._coeffs_d) else: return pd_t15(t, self.t0, self.p, self._coeffs) def _pd_numerical_derivatives(self, t, e=1e-4): t = asarray(t) res = zeros((6, t.size)) r0 = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase, self.p, self.a, self.i, self.e, self.w)) res[0] = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase + e, self.p, self.a, self.i, self.e, self.w)) res[1] = pd_t15(t, self.t0, self.p + e, solve_xy_p5s(self.phase, self.p + e, self.a, self.i, self.e, self.w)) res[2] = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase, self.p, self.a + e, self.i, self.e, self.w)) res[3] = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase, self.p, self.a, self.i + e, self.e, self.w)) res[4] = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase, self.p, self.a, self.i, self.e + e, self.w)) res[5] = pd_t15(t, self.t0, self.p, solve_xy_p5s(self.phase, self.p, self.a, self.i, self.e, self.w + e)) res = (res - r0) / e return res class Knot2DFit(Knot2D): def __init__(self, phase: float, t0: float, p: float, rho: float, b: float, secw: float, sesw: float, derivatives: bool = False): self.rho = rho self.b = b self.secw = secw self.sesw = sesw a = as_from_rhop(rho, p) e = secw ** 2 + sesw ** 2 w = arctan2(sesw, secw) i = i_from_baew(b, a, e, w) super().__init__(phase, t0, p, a, i, e, w, derivatives) def _c_derivative_coeffs(self): d = diffs_fitting(self.p, self.rho, self.b, self.secw, self.sesw, 1e-4) self._coeffs_d = xy_derivative_coeffs(d, 1e-4, self._coeffs)
PypiClean
/CL_Write_Results_Library-1.5.2-py3-none-any.whl/CL_Write_Results_Library/CL_Write_Results_Library.py
import os import openpyxl from openpyxl.styles import Alignment, Font from openpyxl.cell import Cell from robot.libraries.BuiltIn import BuiltIn class CL_Write_Results_Library: ROBOT_LISTENER_API_VERSION = 2 def __init__(self, filepath, sheetname, agent_type): """ This Class is to be run as a listener with the following command in the run configuration: --listener CPL_Write_Results_Library;PycharmProjects\Partners_International\Data\P@I_Traceability_Matrix.xlsx;req_traceability_matrix;local OR --listener CPL_Write_Results_Library;PycharmProjects\Partners_International\Data\P@I_Traceability_Matrix.xlsx;req_traceability_matrix;serveragent Passes in parameters path and filename of the Azure DevOps project requirement traceability matrix excel file (parameter: filepath), the requirement traceability matrix worksheet name (parameter: sheetname), and the agent type (paramerter: agent_type, valid values: local or serveragent). Use agent_type = local, if the automation scripts are to be run from a local Azure DevOps build agent or from Jenkins. Use agent_type = serveragent, if the automation scripts are to be run from an Azure DevOps build agent created on the server """ file = filepath if agent_type != 'serveragent': username = os.environ['USERNAME'] print(username) file = "c:\\users\\" + username + "\\" + filepath self.filename = file self.sheetname = sheetname def end_test(self, name, attrs): """ When used as listener, this function is triggered when the test ends and does not require inputted parameters. This functional formats the requirement traceability matrix worksheet excel file and writes the following information to the requirement traceability matrix worksheet excel file: 1. test case status: PASS or FAIL 2. the error message for failed test cases 3. the time it took to run each test case, in milliseconds. """ wb = openpyxl.load_workbook(self.filename) ws = wb.get_sheet_by_name(self.sheetname) ws.column_dimensions['A'].width = 15 ws.column_dimensions['B'].width = 15 ws.column_dimensions['C'].width = 15 ws.column_dimensions['D'].width = 20 ws.column_dimensions['E'].width = 20 ws.column_dimensions['F'].width = 75 ws.column_dimensions['G'].width = 75 ws.column_dimensions['H'].width = 25 ws.column_dimensions['I'].width = 70 ws.column_dimensions['J'].width = 125 ws.column_dimensions['K'].width = 75 ws.column_dimensions['L'].width = 20 ws.column_dimensions['M'].width = 20 ws.column_dimensions['N'].width = 20 for cell in ws["1:1"]: cell.font = Font(bold=True) for row in ws.iter_rows(): for cell in row: cell.alignment = Alignment(wrap_text=True,vertical = 'top') rowEndIndex = ws.max_row + 1 for x in range(2, rowEndIndex): if ws.cell(x, 7).value == name: ws.cell(x, 8).value = attrs['status'] ws.cell(x, 9).value = attrs['message'] ws.cell(x, 12).value = attrs['elapsedtime'] break # exit for loop wb.save(self.filename) # Save the workbook
PypiClean
/Ludo_the_game-0.0.1-py3-none-any.whl/ludo_pkg/ex43.py
from sys import exit from random import randint from textwrap import dedent class Scene(object): def enter(self): print("This scene is not yet configured.") print("Subclass it and implement enter().") exit(1) class Engine(object): def __init__(self, scene_map): self.scene_map = scene_map def play(self): current_scene = self.scene_map.opening_scene() last_scene = self.scene_map.next_scene('finished') while current_scene != last_scene: next_scene_name = current_scene.enter() current_scene = self.scene_map.next_scene(next_scene_name) # be sure to print out the last scene current_scene.enter() class Death(Scene): quips = [ "You died. You kinda suck at this.", "your Mom would be proud... if she were smarter.", "Such a luser.", "I have a small puppy that's better at this.", "you are worse than your Dad's jokes." ] def enter(self): print(Death.quips[randint(0, len(self.quips) - 1)]) exit(1) class CentralCorridor(Scene): def enter(self): print(dedent(""" The gothons of Planet Percel # 25 have invaded destroyed your entire crew . you are the last member and your last mission is to get the neu bomb from the Weapons Armory, put it in the br blow the ship up after getting into an escape you're running down the central corridor to the Armory when Gothon jumps out, red scaly skin teeth, and evil clown flowing around filled body. He's blocking the door to the Arm about to pull a weapon to blast you.""")) action = input("> ") if action == "Shoot!": print(dedent(""" Quick on the draw you yank out your blaster it at the Gothon. His clown constume is flow moving around his body, which throws off your laser hits his costume ruins his brand new costume bought him, which makes him fly into an in and blast you repeatedly in the face until dead.The he eats you.""")) return 'death' elif action == "dodge!": print(dedent(""" Like a world class boxer you dodge, weave, slide right as the Gothon's blaster cranks past your head, In the middle of your artr you foot slips and you bangs your head on wall pass out. you wake up shortly after die as the Gothon stomps on your head and""")) return "death" elif action == "tell a joke": print(dedent(""" Lucky for you they made you learn Gothon in the academy. you tell the one Gothon joke lbhezbgure vf fb sng, jura fvgf nebha fur not to laugh, then busts out laughing and while he's laughing you run up and shoot the head putting him down, then jump through the Weapon armory door.""")) return "laser_weapon_armory" else: print("DOES NOT COMPUTE!") return 'central_corridor' class laserWeaponArmory(Scene): def enter(self): print(dedent(""" you do a dive roll into the Weapon Armorym, created the room for more Gothons that might be hiding quite. you stand up and run to the room and find the neutron bomb in its con There;s a keypad lock on the box and you need get the bomb out. if you get code wrong 1 the lock close forever and you can't get the code is 3 digits.""")) code = f"{randint(1,9)}{randint(1,9)}{randint(1,9)}" guess = input("[keypad]> ") guesses = 0 while guess != code and guesses < 10: print("BZZZEDDD!") guesses += 1 guess = input("[keypad]> ") if guess == code: print(dedent(""" The container clicks opne and the seal breaks gas out. you grab the newtron bomb and run you can to the bridge where you must place right spot.""")) return 'the_bridge' else: print(dedent(""" The lock buzzess one last time and you sickening melting sound as the mechanism together. you decide to sit there, and find gothons blow up the ship form their ship""")) return 'death' class TheBridge(Scene): def enter(self): print(dedent(""" You burst onto the Bridge with the netron des under your arm and surprise 5 Gothons who are take control of their ship. Each of them has an aclwn costume than the last . They haven't pul weapons out yet, as they see the activate bomb arm and don'twant to set it off""")) action = input(">") if action == "throw the bomb": print(dedent("""In a panic you throw the bomb at the ground and make a leap for the door.Right as you Gothon shoots you right in the back killi you die you see another Gothon franticallicsarm the bomb. you die knowing they will blow up it goes off.""" )) return 'death' elif action == "slowly place the bomb": print(dedent(""" You point your blaster at the bomb under the Gothons put their hands up and start you inch backward to the door, open it, a carefully place the bomb on the floor, po blaster at it.you then jump back through punch the close button and blast the lock Gothons can't get out. Now that the bomb you run to the escape pod to get off""")) return 'escape_pod' else: print("DOES NOT COMPUTE!") return "the_bridge" class EscapePod(Scene): def enter(self): print(dedent(""" You rush through the ship desperately trying the escape pod before the whole ship explodes like hardly any Gothons on the ship, so you clear of interference. You get ot the chamber escape Pods, and now need to pick one to take them could be damaged but you don't have time There's 5 pods which one do you take?""")) good_pod = randint(1, 5) guess = input("[pod #] >") if int(guess) != good_pod: print(dedent(""" you jump into pod {guess} and hit eject the pod escapes out inot the void of space implodes as the hull ruptures, crushing you like jam jelly.""")) return 'death' else: print(dedent(""" You jump into pod {guess} and hit the eject the pod easily slides out inot space head planet below.As it flies to the planet, bakc an see your ship implode them explode bright star, taking out the Gothon ship a time. you won!""")) return 'finished' class Finished(Scene): def enter(self): print("you won! Good job.") return 'finished' class Map(object): scenes = { 'central_corridor': CentralCorridor() , 'laser_weapon_armory' : laserWeaponArmory(), 'the_bridge' : TheBridge(), 'escape_pod' : EscapePod(), 'death' : Death(), 'finished' : Finished() } def __init__(self, start_scene): self.start_scene = start_scene def next_scene(self, scene_name): val = Map.scenes.get(scene_name) return val def opening_scene(self): return self.next_scene(self.start_scene) a_map = Map('central_corridor') a_game = Engine(a_map) a_game.play()
PypiClean
/ModbusGuiApp-1.1-py3-none-any.whl/ModbusGuiApp-1.1.dist-info/license.md
MIT License Copyright (c) [2020] [Ivan] Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PypiClean
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-3.1.2/SCons/dblite.py
from __future__ import print_function import os import pickle import shutil import time from SCons.compat import PICKLE_PROTOCOL keep_all_files = 00000 ignore_corrupt_dbfiles = 0 def corruption_warning(filename): print("Warning: Discarding corrupt database:", filename) try: unicode except NameError: def is_string(s): return isinstance(s, str) else: def is_string(s): return type(s) in (str, unicode) def is_bytes(s): return isinstance(s, bytes) try: unicode('a') except NameError: def unicode(s): return s dblite_suffix = '.dblite' # TODO: Does commenting this out break switching from py2/3? # if bytes is not str: # dblite_suffix += '.p3' tmp_suffix = '.tmp' class dblite(object): """ Squirrel away references to the functions in various modules that we'll use when our __del__() method calls our sync() method during shutdown. We might get destroyed when Python is in the midst of tearing down the different modules we import in an essentially arbitrary order, and some of the various modules's global attributes may already be wiped out from under us. See the discussion at: http://mail.python.org/pipermail/python-bugs-list/2003-March/016877.html """ _open = open _pickle_dump = staticmethod(pickle.dump) _pickle_protocol = PICKLE_PROTOCOL _os_chmod = os.chmod try: _os_chown = os.chown except AttributeError: _os_chown = None _os_rename = os.rename _os_unlink = os.unlink _shutil_copyfile = shutil.copyfile _time_time = time.time def __init__(self, file_base_name, flag, mode): assert flag in (None, "r", "w", "c", "n") if flag is None: flag = "r" base, ext = os.path.splitext(file_base_name) if ext == dblite_suffix: # There's already a suffix on the file name, don't add one. self._file_name = file_base_name self._tmp_name = base + tmp_suffix else: self._file_name = file_base_name + dblite_suffix self._tmp_name = file_base_name + tmp_suffix self._flag = flag self._mode = mode self._dict = {} self._needs_sync = 00000 if self._os_chown is not None and (os.geteuid() == 0 or os.getuid() == 0): # running as root; chown back to current owner/group when done try: statinfo = os.stat(self._file_name) self._chown_to = statinfo.st_uid self._chgrp_to = statinfo.st_gid except OSError as e: # db file doesn't exist yet. # Check os.environ for SUDO_UID, use if set self._chown_to = int(os.environ.get('SUDO_UID', -1)) self._chgrp_to = int(os.environ.get('SUDO_GID', -1)) else: self._chown_to = -1 # don't chown self._chgrp_to = -1 # don't chgrp if self._flag == "n": with self._open(self._file_name, "wb", self._mode): pass # just make sure it exists else: try: f = self._open(self._file_name, "rb") except IOError as e: if self._flag != "c": raise e with self._open(self._file_name, "wb", self._mode): pass # just make sure it exists else: p = f.read() f.close() if len(p) > 0: try: if bytes is not str: self._dict = pickle.loads(p, encoding='bytes') else: self._dict = pickle.loads(p) except (pickle.UnpicklingError, EOFError, KeyError): # Note how we catch KeyErrors too here, which might happen # when we don't have cPickle available (default pickle # throws it). if (ignore_corrupt_dbfiles == 0): raise if (ignore_corrupt_dbfiles == 1): corruption_warning(self._file_name) def close(self): if self._needs_sync: self.sync() def __del__(self): self.close() def sync(self): self._check_writable() f = self._open(self._tmp_name, "wb", self._mode) self._pickle_dump(self._dict, f, self._pickle_protocol) f.close() # Windows doesn't allow renaming if the file exists, so unlink # it first, chmod'ing it to make sure we can do so. On UNIX, we # may not be able to chmod the file if it's owned by someone else # (e.g. from a previous run as root). We should still be able to # unlink() the file if the directory's writable, though, so ignore # any OSError exception thrown by the chmod() call. try: self._os_chmod(self._file_name, 0o777) except OSError: pass self._os_unlink(self._file_name) self._os_rename(self._tmp_name, self._file_name) if self._os_chown is not None and self._chown_to > 0: # don't chown to root or -1 try: self._os_chown(self._file_name, self._chown_to, self._chgrp_to) except OSError: pass self._needs_sync = 00000 if (keep_all_files): self._shutil_copyfile( self._file_name, self._file_name + "_" + str(int(self._time_time()))) def _check_writable(self): if (self._flag == "r"): raise IOError("Read-only database: %s" % self._file_name) def __getitem__(self, key): return self._dict[key] def __setitem__(self, key, value): self._check_writable() if (not is_string(key)): raise TypeError("key `%s' must be a string but is %s" % (key, type(key))) if (not is_bytes(value)): raise TypeError("value `%s' must be a bytes but is %s" % (value, type(value))) self._dict[key] = value self._needs_sync = 0o001 def keys(self): return list(self._dict.keys()) def has_key(self, key): return key in self._dict def __contains__(self, key): return key in self._dict def iterkeys(self): # Wrapping name in () prevents fixer from "fixing" this return (self._dict.iterkeys)() __iter__ = iterkeys def __len__(self): return len(self._dict) def open(file, flag=None, mode=0o666): return dblite(file, flag, mode) def _exercise(): db = open("tmp", "n") assert len(db) == 0 db["foo"] = "bar" assert db["foo"] == "bar" db[unicode("ufoo")] = unicode("ubar") assert db[unicode("ufoo")] == unicode("ubar") db.sync() db = open("tmp", "c") assert len(db) == 2, len(db) assert db["foo"] == "bar" db["bar"] = "foo" assert db["bar"] == "foo" db[unicode("ubar")] = unicode("ufoo") assert db[unicode("ubar")] == unicode("ufoo") db.sync() db = open("tmp", "r") assert len(db) == 4, len(db) assert db["foo"] == "bar" assert db["bar"] == "foo" assert db[unicode("ufoo")] == unicode("ubar") assert db[unicode("ubar")] == unicode("ufoo") try: db.sync() except IOError as e: assert str(e) == "Read-only database: tmp.dblite" else: raise RuntimeError("IOError expected.") db = open("tmp", "w") assert len(db) == 4 db["ping"] = "pong" db.sync() try: db[(1, 2)] = "tuple" except TypeError as e: assert str(e) == "key `(1, 2)' must be a string but is <type 'tuple'>", str(e) else: raise RuntimeError("TypeError exception expected") try: db["list"] = [1, 2] except TypeError as e: assert str(e) == "value `[1, 2]' must be a string but is <type 'list'>", str(e) else: raise RuntimeError("TypeError exception expected") db = open("tmp", "r") assert len(db) == 5 db = open("tmp", "n") assert len(db) == 0 dblite._open("tmp.dblite", "w") db = open("tmp", "r") dblite._open("tmp.dblite", "w").write("x") try: db = open("tmp", "r") except pickle.UnpicklingError: pass else: raise RuntimeError("pickle exception expected.") global ignore_corrupt_dbfiles ignore_corrupt_dbfiles = 2 db = open("tmp", "r") assert len(db) == 0 os.unlink("tmp.dblite") try: db = open("tmp", "w") except IOError as e: assert str(e) == "[Errno 2] No such file or directory: 'tmp.dblite'", str(e) else: raise RuntimeError("IOError expected.") if (__name__ == "__main__"): _exercise() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/MCRAMP-0.0.3-py3-none-any.whl/mcramp/geom/cylinder_ext.py
from .gprim import GPrim #pylint: disable=E0401 import numpy as np import pyopencl as cl import pyopencl.array as clarr import os import re class GCylinderExt(GPrim): """ Geometry kernel for cylinder geometry. Intersects with the exterior of the cylinder, i.e. first intersection must be at positive time for scattering to occur. ... Parameters ---------- radius : float The radius of the cylinder height : float The height of the cylinder Notes ----- Intersection 1 : Negative time intersection with the portion of the banana behind the\ neutron trajectory. Intersection 2 : Positive time intersection with the portion of the banana ahead of the\ neutron trajectory. Methods ------- None """ def __init__(self, radius=0, height=0, idx=0, ctx=None): self.radius = np.float32(radius) self.height = np.float32(height) self.idx = idx with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cylinder_ext.cl'), mode='r') as f: self.prg = cl.Program(ctx, f.read()).build(options=r'-I "{}/include"'.format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) def intersect_prg(self, queue, N, neutron_buf, intersection_buf, iidx_buf): self.prg.intersect(queue, (N, ), None, neutron_buf, intersection_buf, iidx_buf, np.uint32(self.idx), self.radius, self.height) def lines(self): angles = np.linspace(0, np.pi) h_2 = self.height / 2.0 x = [] y = [] z = [] for ang in angles: x.append(self.radius*np.sin(ang)) y.append(h_2) z.append(self.radius*np.cos(ang)) for ang in reversed(angles): x.append(self.radius*np.sin(ang)) y.append(-h_2) z.append(self.radius*np.cos(ang)) x.append(x[0]) y.append(y[0]) z.append(z[0]) angles = np.linspace(0, -np.pi) for ang in angles: x.append(self.radius*np.sin(ang)) y.append(h_2) z.append(self.radius*np.cos(ang)) for ang in reversed(angles): x.append(self.radius*np.sin(ang)) y.append(-h_2) z.append(self.radius*np.cos(ang)) x.append(x[0]) y.append(y[0]) z.append(z[0]) return [x, y, z]
PypiClean
/IsoCon-0.3.3.tar.gz/IsoCon-0.3.3/modules/end_invariant_functions.py
from __future__ import print_function import os import sys import argparse import re # import numpy as np import signal from multiprocessing import Pool import multiprocessing as mp import math from time import time import copy import networkx as nx import edlib from modules import functions from modules.input_output import write_output from modules.SW_alignment_module import parasail_alignment def parasail_traceback_allow_ends(x, y, end_threshold = 0): (s1, s2, (s1_alignment, s2_alignment, (matches, mismatches, indels)) ) = parasail_alignment(x, y, 0, 0, opening_penalty = 3, mismatch_penalty = -3) ed = mismatches + indels p = "[-]+" m_start1 = re.match(p,s1_alignment) m_start2 = re.match(p,s2_alignment) if m_start1: cut_start_diff1 = len(m_start1.group(0)) ed -= min(cut_start_diff1, end_threshold) elif m_start2: cut_start_diff2 = len(m_start2.group(0)) ed -= min(cut_start_diff2, end_threshold) m_end1 = re.match(p,s1_alignment[::-1]) m_end2 = re.match(p,s2_alignment[::-1]) if m_end1: cut_end_diff1 = len(m_end1.group(0)) ed -= min(cut_end_diff1, end_threshold) elif m_end2: cut_end_diff2 = len(m_end2.group(0)) ed -= min(cut_end_diff2, end_threshold) return ed def get_nearest_neighbors_parasail(batch_of_queries, global_index_in_matrix, start_index, seq_to_acc_list_sorted, neighbor_search_depth, ignore_ends_len): best_edit_distances = {} lower_target_edit_distances = {} # print("Processing global index:" , global_index_in_matrix) # error_types = {"D":0, "S": 0, "I": 0} for i in range(start_index, start_index + len(batch_of_queries)): if i % 500 == 0: print("processing ", i) seq1 = seq_to_acc_list_sorted[i][0] acc1 = seq_to_acc_list_sorted[i][1] best_edit_distances[acc1] = {} if acc1 in lower_target_edit_distances: best_ed = lower_target_edit_distances[acc1] # print("already_comp", best_ed ) else: best_ed = len(seq1) stop_up = False stop_down = False j = 1 while True: # for j in range(1,len(seq_to_acc_list_sorted)): if i - j < 0: stop_down = True if i + j >= len(seq_to_acc_list_sorted): stop_up = True if not stop_down: seq2 = seq_to_acc_list_sorted[i - j][0] acc2 = seq_to_acc_list_sorted[i - j][1] if math.fabs(len(seq1) - len(seq2)) > best_ed + 2*ignore_ends_len: stop_down = True if not stop_up: seq3 = seq_to_acc_list_sorted[i + j][0] acc3 = seq_to_acc_list_sorted[i + j][1] if math.fabs(len(seq1) - len(seq3)) > best_ed + 2*ignore_ends_len: stop_up = True if not stop_down: edit_distance = parasail_traceback_allow_ends(seq1, seq2, end_threshold = ignore_ends_len) if 0 < edit_distance < best_ed: best_ed = edit_distance best_edit_distances[acc1] = {} best_edit_distances[acc1][acc2] = edit_distance elif edit_distance == best_ed: best_edit_distances[acc1][acc2] = edit_distance if acc2 in lower_target_edit_distances: if 0 < edit_distance < lower_target_edit_distances[acc2]: lower_target_edit_distances[acc2] = edit_distance else: if 0 < edit_distance: lower_target_edit_distances[acc2] = edit_distance if not stop_up: edit_distance = parasail_traceback_allow_ends(seq1, seq3, end_threshold = ignore_ends_len) if 0 < edit_distance < best_ed: best_ed = edit_distance best_edit_distances[acc1] = {} best_edit_distances[acc1][acc3] = edit_distance elif edit_distance == best_ed: best_edit_distances[acc1][acc3] = edit_distance if acc3 in lower_target_edit_distances: if 0 < edit_distance < lower_target_edit_distances[acc3]: lower_target_edit_distances[acc3] = edit_distance else: if 0 < edit_distance: lower_target_edit_distances[acc3] = edit_distance if stop_down and stop_up: break if j >= neighbor_search_depth: break j += 1 # print(best_edit_distances[acc1]) # print("best ed:", best_ed) # if best_ed > 100: # print(best_ed, "for seq with length", len(seq1), seq1) return best_edit_distances def get_nearest_neighbors_under_ignored_edge_ends_parasail(seq_to_acc_list_sorted, params): if params.nr_cores == 1: best_edit_distances = get_nearest_neighbors_parasail(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, params.neighbor_search_depth, params.ignore_ends_len) # implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.! else: ####### parallelize alignment ######### # pool = Pool(processes=mp.cpu_count()) original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, original_sigint_handler) pool = Pool(processes=mp.cpu_count()) # here we split the input into chunks chunk_size = max(int(len(seq_to_acc_list_sorted) / (10*mp.cpu_count())), 20 ) ref_seq_chunks = [ ( max(0, i - params.neighbor_search_depth -1), seq_to_acc_list_sorted[max(0, i - params.neighbor_search_depth -1) : i + chunk_size + params.neighbor_search_depth +1 ]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size) ] chunks = [(i, seq_to_acc_list_sorted[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size)] if params.verbose: write_output.logger(str([j for j, ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) write_output.logger("reference chunks:" + str([len(ch) for j,ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) # print([j for j, ch in ref_seq_chunks]) # print("reference chunks:", [len(ch) for j,ch in ref_seq_chunks]) write_output.logger(str([i for i,ch in chunks]), params.develop_logfile, timestamp=False) write_output.logger("query chunks:" + str([len(ch) for i,ch in chunks]), params.develop_logfile, timestamp=False) print([i for i,ch in chunks]) print("query chunks:", [len(ch) for i,ch in chunks]) # get nearest_neighbors takes thre sub containers: # chunk - a container with (sequences, accesions)-tuples to be aligned (queries) # ref_seq_chunks - a container with (sequences, accesions)-tuples to be aligned to (references) # already_converged_chunks - a set of query sequences that has already converged try: res = pool.map_async(get_nearest_neighbors_helper_parasail, [ ((chunks[i][1], chunks[i][0], chunks[i][0] - ref_seq_chunks[i][0], ref_seq_chunks[i][1], params.neighbor_search_depth, params.ignore_ends_len), {}) for i in range(len(chunks))] ) best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals. except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() sys.exit() else: # print("Normal termination") pool.close() pool.join() best_edit_distances = {} for sub_graph in best_edit_distances_results: for seq in sub_graph: assert seq not in best_edit_distances best_edit_distances.update(sub_graph) return best_edit_distances def get_nearest_neighbors_helper_parasail(arguments): args, kwargs = arguments return get_nearest_neighbors_parasail(*args, **kwargs) def edlib_traceback_allow_ends(x, y, mode="NW", task="path", k=1, end_threshold = 0): result = edlib.align(x, y, mode=mode, task=task, k=k) ed = result["editDistance"] locations = result["locations"] cigar = result["cigar"] if cigar: tuples = [] result = re.split(r'[=DXSMI]+', cigar) i = 0 for length in result[:-1]: i += len(length) type_ = cigar[i] i += 1 tuples.append((length, type_ )) ed_ignore_ends = ed if tuples[0][1] == "D" or tuples[0][1] == "I": begin_snippet = int(tuples[0][0]) if begin_snippet <= end_threshold: ed_ignore_ends -= int(begin_snippet) if tuples[-1][1] == "D" or tuples[-1][1] == "I": end_snippet = int(tuples[-1][0]) if end_snippet <= end_threshold: ed_ignore_ends -= int(end_snippet) # if ed > ed_ignore_ends: # print("ed global:", ed, "ed after:", ed_ignore_ends) ed = ed_ignore_ends # if ed ==0: # print("here") return ed, locations, cigar def read_fasta(fasta_file): fasta_seqs = {} k = 0 temp = '' accession = '' for line in fasta_file: if line[0] == '>' and k == 0: accession = line[1:].strip() fasta_seqs[accession] = '' k += 1 elif line[0] == '>': yield accession, temp temp = '' accession = line[1:].strip() else: temp += line.strip() if accession: yield accession, temp def get_nearest_neighbors_helper(arguments): args, kwargs = arguments return get_nearest_neighbors(*args, **kwargs) def get_nearest_neighbors_under_ignored_edge_ends(seq_to_acc_list_sorted, params): if params.nr_cores == 1: best_edit_distances = get_nearest_neighbors(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, params.neighbor_search_depth, params.ignore_ends_len) # implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.! else: ####### parallelize alignment ######### # pool = Pool(processes=mp.cpu_count()) original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, original_sigint_handler) pool = Pool(processes=mp.cpu_count()) # here we split the input into chunks chunk_size = max(int(len(seq_to_acc_list_sorted) / (10*mp.cpu_count())), 20 ) ref_seq_chunks = [ ( max(0, i - params.neighbor_search_depth -1), seq_to_acc_list_sorted[max(0, i - params.neighbor_search_depth -1) : i + chunk_size + params.neighbor_search_depth +1 ]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size) ] chunks = [(i, seq_to_acc_list_sorted[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size)] if params.verbose: write_output.logger(str([j for j, ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) write_output.logger("reference chunks:" + str([len(ch) for j,ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) # print([j for j, ch in ref_seq_chunks]) # print("reference chunks:", [len(ch) for j,ch in ref_seq_chunks]) write_output.logger(str([i for i,ch in chunks]), params.develop_logfile, timestamp=False) write_output.logger("query chunks:" + str([len(ch) for i,ch in chunks]), params.develop_logfile, timestamp=False) print([i for i,ch in chunks]) print("query chunks:", [len(ch) for i,ch in chunks]) # get nearest_neighbors takes thre sub containers: # chunk - a container with (sequences, accesions)-tuples to be aligned (queries) # ref_seq_chunks - a container with (sequences, accesions)-tuples to be aligned to (references) # already_converged_chunks - a set of query sequences that has already converged try: res = pool.map_async(get_nearest_neighbors_helper, [ ((chunks[i][1], chunks[i][0], chunks[i][0] - ref_seq_chunks[i][0], ref_seq_chunks[i][1], params.neighbor_search_depth, params.ignore_ends_len), {}) for i in range(len(chunks))] ) best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals. except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() sys.exit() else: # print("Normal termination") pool.close() pool.join() best_edit_distances = {} for sub_graph in best_edit_distances_results: for seq in sub_graph: assert seq not in best_edit_distances best_edit_distances.update(sub_graph) return best_edit_distances def get_nearest_neighbors(batch_of_queries, global_index_in_matrix, start_index, seq_to_acc_list_sorted, neighbor_search_depth, ignore_ends_threshold): best_edit_distances = {} lower_target_edit_distances = {} # print("Processing global index:" , global_index_in_matrix) # error_types = {"D":0, "S": 0, "I": 0} for i in range(start_index, start_index + len(batch_of_queries)): if i % 500 == 0: print("processing ", i) seq1 = seq_to_acc_list_sorted[i][0] acc1 = seq_to_acc_list_sorted[i][1] best_edit_distances[acc1] = {} if acc1 in lower_target_edit_distances: best_ed = lower_target_edit_distances[acc1] # print("already_comp", best_ed ) else: best_ed = len(seq1) stop_up = False stop_down = False j = 1 while True: # for j in range(1,len(seq_to_acc_list_sorted)): if i - j < 0: stop_down = True if i + j >= len(seq_to_acc_list_sorted): stop_up = True if not stop_down: seq2 = seq_to_acc_list_sorted[i - j][0] acc2 = seq_to_acc_list_sorted[i - j][1] if math.fabs(len(seq1) - len(seq2)) > best_ed + 2*ignore_ends_threshold: stop_down = True if not stop_up: seq3 = seq_to_acc_list_sorted[i + j][0] acc3 = seq_to_acc_list_sorted[i + j][1] if math.fabs(len(seq1) - len(seq3)) > best_ed + 2*ignore_ends_threshold: stop_up = True if not stop_down: edit_distance_f, locations, cigar = edlib_traceback_allow_ends(seq1, seq2, mode="NW", task="path", k=best_ed+2*ignore_ends_threshold, end_threshold = ignore_ends_threshold) edit_distance_r, locations, cigar = edlib_traceback_allow_ends(seq1[::-1], seq2[::-1], mode="NW", task="path", k=best_ed+2*ignore_ends_threshold, end_threshold = ignore_ends_threshold) if edit_distance_f >= 0 and edit_distance_r >= 0: edit_distance = min(edit_distance_f, edit_distance_r) else: edit_distance = max(edit_distance_f, edit_distance_r) if 0 <= edit_distance < best_ed: best_ed = edit_distance best_edit_distances[acc1] = {} best_edit_distances[acc1][acc2] = edit_distance elif edit_distance == best_ed: best_edit_distances[acc1][acc2] = edit_distance if acc2 in lower_target_edit_distances: if 0 < edit_distance < lower_target_edit_distances[acc2]: lower_target_edit_distances[acc2] = edit_distance else: if 0 < edit_distance: lower_target_edit_distances[acc2] = edit_distance if not stop_up: edit_distance_f, locations, cigar = edlib_traceback_allow_ends(seq1, seq3, mode="NW", task="path", k=best_ed+2*ignore_ends_threshold, end_threshold = ignore_ends_threshold) edit_distance_r, locations, cigar = edlib_traceback_allow_ends(seq1[::-1], seq3[::-1], mode="NW", task="path", k=best_ed+2*ignore_ends_threshold, end_threshold = ignore_ends_threshold) if edit_distance_f >= 0 and edit_distance_r >= 0: edit_distance = min(edit_distance_f, edit_distance_r) else: edit_distance = max(edit_distance_f, edit_distance_r) if 0 <= edit_distance < best_ed: best_ed = edit_distance best_edit_distances[acc1] = {} best_edit_distances[acc1][acc3] = edit_distance elif edit_distance == best_ed: best_edit_distances[acc1][acc3] = edit_distance if acc3 in lower_target_edit_distances: if 0 < edit_distance < lower_target_edit_distances[acc3]: lower_target_edit_distances[acc3] = edit_distance else: if 0 < edit_distance: lower_target_edit_distances[acc3] = edit_distance if stop_down and stop_up: break if j >= neighbor_search_depth: break j += 1 # print(best_edit_distances[acc1]) # print("best ed:", best_ed) # if best_ed > 100: # print(best_ed, "for seq with length", len(seq1), seq1) return best_edit_distances def partition_highest_reachable_with_edge_degrees(G_star, params): # G_star, converged = graphs.construct_exact_nearest_neighbor_graph_improved(S, params) unique_start_strings = set(G_star.nodes()) # print("len G_star:", len(G_star)) partition_sizes = [] nr_consensus = 0 G_transpose = nx.reverse(G_star) # print("len G_star_transposed (nearest_neighbors):", len(G_transpose)) if params.verbose: print("Nodes in nearest_neighbor graph:", len(G_transpose)) print("Neighbors per nodes in nearest neighbor graph", sorted([len(list(G_transpose.neighbors(n)) ) for n in G_transpose], reverse=True)) all_weak_components = [ c for c in nx.weakly_connected_components(G_transpose)] M = {} partition = {} # print("here") for subgraph_set in sorted(all_weak_components, key=len, reverse=True): # print("Subgraph of size", len(subgraph.nodes()), "nr edges:", [x for x in subgraph.nodes()] ) while subgraph_set: reachable_comp_sizes = [] reachable_comp_weights = {} reachable_comp_nodes = [] direct_neighbors = {} processed = set() biggest_reachable_comp_size = 0 biggest_reachable_comp_weight = 0 biggest_reachable_comp_nodes = set() biggest_reachable_comp_nearest_neighbor = "XXXXX" for m in subgraph_set: if m in processed: continue reachable_comp = set([m]) reachable_comp_weight = G_transpose.node[m]["degree"] processed.add(m) #################################################### # take all reachable nodes #################################################### for n1,n2 in nx.dfs_edges(G_transpose, source=m): # store reachable node as processed here to avoid computation if n2 == m: continue processed.add(n2) reachable_comp.add(n2) reachable_comp_weight += G_transpose.node[n2]["degree"] #################################################### #################################################### # print("total component weight:", reachable_comp_weight) if biggest_reachable_comp_weight == 0: biggest_reachable_comp_weight = reachable_comp_weight biggest_reachable_comp_nodes = set(reachable_comp) biggest_reachable_comp_size = len(reachable_comp) biggest_reachable_comp_nearest_neighbor = m elif reachable_comp_weight >= biggest_reachable_comp_weight: if reachable_comp_weight > biggest_reachable_comp_weight: biggest_reachable_comp_weight = reachable_comp_weight biggest_reachable_comp_nodes = set(reachable_comp) biggest_reachable_comp_size = len(reachable_comp) biggest_reachable_comp_nearest_neighbor = m elif reachable_comp_weight == biggest_reachable_comp_weight: if biggest_reachable_comp_weight > 1: # print("tie both in weighted partition size and total edit distance. Choosing lexographically smaller nearest_neighbor") # print(" weighted partition size:", biggest_reachable_comp_weight, " total edit distance:", edit_distances_to_m[m]) pass if m < biggest_reachable_comp_nearest_neighbor: biggest_reachable_comp_nodes = set(reachable_comp) biggest_reachable_comp_nearest_neighbor = m else: pass else: print("BUG!") if biggest_reachable_comp_weight == 0: # if there were no edges! partition is nearest_neighbor itself M[m] = 0 partition[m] = set() else: nearest_neighbor = biggest_reachable_comp_nearest_neighbor # "XXXXXX" #biggest_reachable_comp_nearest_neighbor # max_direct_weight = 0 # print("total nodes searched in this pass:", len(biggest_reachable_comp_nodes)) for n in biggest_reachable_comp_nodes: direct_weight = G_transpose.node[n]["degree"] direct_weight += len(list(G_transpose.neighbors(n))) if direct_weight > max_direct_weight: max_direct_weight = direct_weight nearest_neighbor = n elif direct_weight == max_direct_weight: nearest_neighbor = min(nearest_neighbor, n) # print("nearest_neighbor direct weight:", max_direct_weight, "nodes in reachable:", len(biggest_reachable_comp_nodes)) M[nearest_neighbor] = biggest_reachable_comp_weight partition[nearest_neighbor] = biggest_reachable_comp_nodes.difference(set([nearest_neighbor])) assert nearest_neighbor in biggest_reachable_comp_nodes G_transpose.remove_nodes_from(biggest_reachable_comp_nodes) subgraph_set = subgraph_set - biggest_reachable_comp_nodes nr_consensus += 1 if params.verbose: print("NR CONSENSUS:", nr_consensus) print("NR nearest_neighbors:", len(M), len(partition)) print("partition sizes(identical strings counted once): ", sorted([len(partition[p]) +1 for p in partition], reverse = True)) total_strings_in_partition = sum([ len(partition[p]) +1 for p in partition]) partition_sequences = set() for m in partition: partition_sequences.add(m) # print("partition size:", len(partition[m])) # print(len(m)) for s in partition[m]: partition_sequences.add(s) assert unique_start_strings == partition_sequences assert total_strings_in_partition == len(unique_start_strings) return G_star, partition, M def get_all_NN_old(batch_of_queries, global_index_in_matrix, start_index, seq_to_acc_list_sorted, neighbor_search_depth, ignore_ends_threshold): all_neighbors_graph = {} lower_target_edit_distances = {} max_variants = 10 max_ed_allowed = max_variants + 2*ignore_ends_threshold # print("Processing global index:" , global_index_in_matrix) # error_types = {"D":0, "S": 0, "I": 0} for i in range(start_index, start_index + len(batch_of_queries)): if i % 500 == 0: print("processing ", i) seq1 = seq_to_acc_list_sorted[i][0] acc1 = seq_to_acc_list_sorted[i][1] all_neighbors_graph[acc1] = {} stop_up = False stop_down = False j = 1 while True: # for j in range(1,len(seq_to_acc_list_sorted)): if i - j < 0: stop_down = True if i + j >= len(seq_to_acc_list_sorted): stop_up = True if not stop_down: seq2 = seq_to_acc_list_sorted[i - j][0] acc2 = seq_to_acc_list_sorted[i - j][1] if math.fabs(len(seq1) - len(seq2)) > max_variants + 2*ignore_ends_threshold: stop_down = True if not stop_up: seq3 = seq_to_acc_list_sorted[i + j][0] acc3 = seq_to_acc_list_sorted[i + j][1] if math.fabs(len(seq1) - len(seq3)) > max_variants + 2*ignore_ends_threshold: stop_up = True if not stop_down: result = edlib.align(seq1, seq2, mode="NW", k=max_ed_allowed) # , task="path") ed = result["editDistance"] if 0 <= ed : #implies its smaller or equal to max_ed_allowed all_neighbors_graph[acc1][acc2] = (seq1, seq2, ed) if not stop_up: result = edlib.align(seq1, seq3, mode="NW", k=max_ed_allowed) # , task="path") ed = result["editDistance"] if 0 <= ed : #implies its smaller or equal to max_ed_allowed all_neighbors_graph[acc1][acc3] = (seq1, seq3, ed) if stop_down and stop_up: break if j >= neighbor_search_depth: break j += 1 return all_neighbors_graph def edlib_traceback(x, y, mode="HW", task="path", k=1, end_threshold = 0): result = edlib.align(x, y, mode=mode, task=task, k=k) ed = result["editDistance"] cigar = result["cigar"] if cigar: start, end = result["locations"][0] start_offset = start end_offset = len(y) - (end + 1) # print(start_offset, end_offset) ed += max(0, start_offset - end_threshold) ed += max(0, end_offset - end_threshold) tuples = [] result = re.split(r'[=DXSMI]+', cigar) i = 0 for length in result[:-1]: i += len(length) type_ = cigar[i] i += 1 tuples.append((length, type_ )) if tuples[-1][1] == "I": end_snippet_length = int(tuples[-1][0]) ed -= min(int(end_snippet_length), end_threshold) if tuples[0][1] == "I": begin_snippet_length = int(tuples[0][0]) ed -= min(int(begin_snippet_length), end_threshold) return ed def get_all_NN(batch_of_queries, global_index_in_matrix, start_index, seq_to_acc_list_sorted, neighbor_search_depth, ignore_ends_threshold): all_neighbors_graph = {} lower_target_edit_distances = {} max_variants = 10 max_ed_allowed = max_variants + ignore_ends_threshold # print("Processing global index:" , global_index_in_matrix) # error_types = {"D":0, "S": 0, "I": 0} for i in range(start_index, start_index + len(batch_of_queries)): if i % 500 == 0: print("processing ", i) seq1 = seq_to_acc_list_sorted[i][0] acc1 = seq_to_acc_list_sorted[i][1] all_neighbors_graph[acc1] = {} stop_up = False stop_down = False j = 1 while True: # for j in range(1,len(seq_to_acc_list_sorted)): if i - j < 0: stop_down = True if i + j >= len(seq_to_acc_list_sorted): stop_up = True if not stop_down: seq2 = seq_to_acc_list_sorted[i - j][0] acc2 = seq_to_acc_list_sorted[i - j][1] if math.fabs(len(seq1) - len(seq2)) > max_variants + 2*ignore_ends_threshold: stop_down = True if not stop_up: seq3 = seq_to_acc_list_sorted[i + j][0] acc3 = seq_to_acc_list_sorted[i + j][1] if math.fabs(len(seq1) - len(seq3)) > max_variants + 2*ignore_ends_threshold: stop_up = True if not stop_down: ed = edlib_traceback(seq1, seq2, mode="HW", k=max_ed_allowed, task="path", end_threshold = ignore_ends_threshold) # result = edlib.align(seq1, seq2, mode="HW", k=max_ed_allowed) # , task="path") # ed = result["editDistance"] if 0 <= ed <= max_variants: all_neighbors_graph[acc1][acc2] = ed #(seq1, seq2, ed) if not stop_up: ed = edlib_traceback(seq1, seq3, mode="HW", k=max_ed_allowed, task="path", end_threshold = ignore_ends_threshold) # result = edlib.align(seq1, seq3, mode="HW", k=max_ed_allowed) # , task="path") # ed = result["editDistance"] if 0 <= ed <= max_variants: all_neighbors_graph[acc1][acc3] = ed #(seq1, seq3, ed) if stop_down and stop_up: break if j >= neighbor_search_depth: break j += 1 return all_neighbors_graph def get_all_NN_helper(arguments): args, kwargs = arguments return get_all_NN(*args, **kwargs) def get_all_NN_under_ignored_edge_ends(seq_to_acc_list_sorted, params): if params.nr_cores == 1: all_neighbors_graph = get_all_NN(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, params.neighbor_search_depth, params.ignore_ends_len) # all_neighbors_graph_old = get_all_NN_old(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, params.neighbor_search_depth, params.ignore_ends_len) # print("TOTAL EDGES G_ALL OLD edlib TMP:", len([1 for s in all_neighbors_graph_old for t in all_neighbors_graph_old[s]])) # all_old = set([(s,t) for s in all_neighbors_graph_old for t in all_neighbors_graph_old[s]]) # print("TOTAL EDGES G_ALL edlib TMP:", len([1 for s in all_neighbors_graph for t in all_neighbors_graph[s]])) # all_ = set([(s,t) for s in all_neighbors_graph for t in all_neighbors_graph[s]] + [(t,s) for s in all_neighbors_graph for t in all_neighbors_graph[s]]) # for s,t in all_old - all_: # print("not in old:", s,t) # # print(edlib.align()) # res = edlib.align(all_neighbors_graph_old[s][t][0], all_neighbors_graph_old[s][t][1], mode="HW", task="path") # print(res["cigar"], res["editDistance"], res["locations"], len(all_neighbors_graph_old[s][t][1]) - res["locations"][0][1] - 1, res["locations"][0][0] ) # res = edlib.align(all_neighbors_graph_old[s][t][1], all_neighbors_graph_old[s][t][0], mode="HW", task="path") # print(res["cigar"], res["editDistance"] ) # res = edlib.align(all_neighbors_graph_old[s][t][0], all_neighbors_graph_old[s][t][1], mode="NW", task="path") # print(res["cigar"], res["editDistance"]) # print(all_neighbors_graph_old[s][t][2]) # (s1, s2, (s1_alignment, s2_alignment, (matches, mismatches, indels)) ) = parasail_alignment(all_neighbors_graph_old[s][t][0], all_neighbors_graph_old[s][t][1], 0, 0, opening_penalty = 3) # print(s1_alignment) # print(s2_alignment) # implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.! else: ####### parallelize alignment ######### # pool = Pool(processes=mp.cpu_count()) original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, original_sigint_handler) pool = Pool(processes=mp.cpu_count()) # here we split the input into chunks chunk_size = max(int(len(seq_to_acc_list_sorted) / (10*mp.cpu_count())), 20 ) ref_seq_chunks = [ ( max(0, i - params.neighbor_search_depth -1), seq_to_acc_list_sorted[max(0, i - params.neighbor_search_depth -1) : i + chunk_size + params.neighbor_search_depth +1 ]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size) ] chunks = [(i, seq_to_acc_list_sorted[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size)] if params.verbose: write_output.logger(str([j for j, ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) write_output.logger("reference chunks:" + str([len(ch) for j,ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) # print([j for j, ch in ref_seq_chunks]) # print("reference chunks:", [len(ch) for j,ch in ref_seq_chunks]) write_output.logger(str([i for i,ch in chunks]), params.develop_logfile, timestamp=False) write_output.logger("query chunks:" + str([len(ch) for i,ch in chunks]), params.develop_logfile, timestamp=False) print([i for i,ch in chunks]) print("query chunks:", [len(ch) for i,ch in chunks]) # get nearest_neighbors takes thre sub containers: # chunk - a container with (sequences, accesions)-tuples to be aligned (queries) # ref_seq_chunks - a container with (sequences, accesions)-tuples to be aligned to (references) # already_converged_chunks - a set of query sequences that has already converged try: res = pool.map_async(get_all_NN_helper, [ ((chunks[i][1], chunks[i][0], chunks[i][0] - ref_seq_chunks[i][0], ref_seq_chunks[i][1], params.neighbor_search_depth, params.ignore_ends_len), {}) for i in range(len(chunks))] ) best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals. except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() sys.exit() else: # print("Normal termination") pool.close() pool.join() all_neighbors_graph = {} for sub_graph in best_edit_distances_results: for seq in sub_graph: assert seq not in all_neighbors_graph all_neighbors_graph.update(sub_graph) return all_neighbors_graph def get_NN_graph_ignored_ends_edlib(candidate_transcripts, args): seq_to_acc = {seq: acc for (acc, seq) in candidate_transcripts.items()} seq_to_acc_list = list(seq_to_acc.items()) seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0])) all_neighbors_graph = get_all_NN_under_ignored_edge_ends(seq_to_acc_list_sorted, args) # edges needs to be symmetric for c1 in all_neighbors_graph: for c2 in all_neighbors_graph[c1]: ed = all_neighbors_graph[c1][c2] if c1 not in all_neighbors_graph[c2]: all_neighbors_graph[c2][c1] = ed else: min_ed = min(all_neighbors_graph[c1][c2], all_neighbors_graph[c2][c1]) all_neighbors_graph[c2][c1] = min_ed # if all_neighbors_graph[c1][c2][2] < all_neighbors_graph[c2][c1][2]: # all_neighbors_graph[c2][c1] = (all_neighbors_graph[c2][c1][0],all_neighbors_graph[c2][c1][1],all_neighbors_graph[c1][c2][2]) # if all_neighbors_graph[c2][c1] != all_neighbors_graph[c1][c2]: # print(all_neighbors_graph[c2][c1][2], all_neighbors_graph[c1][c2][2]) # res = edlib.align(all_neighbors_graph[c1][c2][0], all_neighbors_graph[c1][c2][1], mode="HW", task="path") # print(res["cigar"], res["editDistance"], res["locations"], len(all_neighbors_graph[c1][c2][1]) - res["locations"][0][1] - 1, res["locations"][0][0] ) # res = edlib.align(all_neighbors_graph[c1][c2][1], all_neighbors_graph[c1][c2][0], mode="HW", task="path") # print(res["cigar"], res["editDistance"], res["locations"], len(all_neighbors_graph[c1][c2][0]) - res["locations"][0][1] - 1, res["locations"][0][0] ) # (s1, s2, (s1_alignment, s2_alignment, (matches, mismatches, indels)) ) = parasail_alignment(all_neighbors_graph[c1][c2][0], all_neighbors_graph[c1][c2][1], 0, 0, opening_penalty = 3, gap_ext = 1) # print(s1_alignment) # print(s2_alignment) assert len(candidate_transcripts) == len(all_neighbors_graph) return all_neighbors_graph def get_nearest_neighbors_graph_under_ignored_ends(candidate_transcripts, args): seq_to_acc = {seq: acc for (acc, seq) in candidate_transcripts.items()} seq_to_acc_list = list(seq_to_acc.items()) seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0])) nearest_neighbor_graph = get_nearest_neighbors_under_ignored_edge_ends(seq_to_acc_list_sorted, args) nearest_neighbor_graph_parasail = get_nearest_neighbors_under_ignored_edge_ends_parasail(seq_to_acc_list_sorted, args) print("edges before:", len([1 for s in nearest_neighbor_graph for r in nearest_neighbor_graph[s] ])) print("edges parasail:", len([1 for s in nearest_neighbor_graph_parasail for r in nearest_neighbor_graph_parasail[s] ])) print("EDs before:", sum([nearest_neighbor_graph[s][r] for s in nearest_neighbor_graph for r in nearest_neighbor_graph[s] ])) print("EDs parasail:", sum([nearest_neighbor_graph_parasail[s][r] for s in nearest_neighbor_graph_parasail for r in nearest_neighbor_graph_parasail[s] ])) for acc1 in list(nearest_neighbor_graph_parasail.keys()): for acc2 in list(nearest_neighbor_graph_parasail[acc1].keys()): ed = nearest_neighbor_graph_parasail[acc1][acc2] if ed > 10: del nearest_neighbor_graph_parasail[acc1][acc2] if args.verbose: print("had ed > 10 statistical test", acc1, acc2) for acc1 in nearest_neighbor_graph_parasail: if args.verbose: for acc2 in nearest_neighbor_graph_parasail[acc1]: if nearest_neighbor_graph_parasail[acc1][acc2] > 0: print("To be tested:", acc1, acc2, nearest_neighbor_graph_parasail[acc1][acc2]) assert len(candidate_transcripts) == len(nearest_neighbor_graph_parasail) return nearest_neighbor_graph_parasail def get_invariants_under_ignored_edge_ends(seq_to_acc_list_sorted, params): if params.nr_cores == 1: best_edit_distances = get_nearest_neighbors(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, params.neighbor_search_depth, params.ignore_ends_len) # implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.! else: ####### parallelize alignment ######### # pool = Pool(processes=mp.cpu_count()) original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, original_sigint_handler) pool = Pool(processes=mp.cpu_count()) # here we split the input into chunks chunk_size = max(int(len(seq_to_acc_list_sorted) / (10*mp.cpu_count())), 20 ) ref_seq_chunks = [ ( max(0, i - params.neighbor_search_depth -1), seq_to_acc_list_sorted[max(0, i - params.neighbor_search_depth -1) : i + chunk_size + params.neighbor_search_depth +1 ]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size) ] chunks = [(i, seq_to_acc_list_sorted[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size)] if params.verbose: write_output.logger(str([j for j, ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) write_output.logger("reference chunks:" + str([len(ch) for j,ch in ref_seq_chunks]), params.develop_logfile, timestamp=False) # print([j for j, ch in ref_seq_chunks]) # print("reference chunks:", [len(ch) for j,ch in ref_seq_chunks]) write_output.logger(str([i for i,ch in chunks]), params.develop_logfile, timestamp=False) write_output.logger("query chunks:" + str([len(ch) for i,ch in chunks]), params.develop_logfile, timestamp=False) print([i for i,ch in chunks]) print("query chunks:", [len(ch) for i,ch in chunks]) # get nearest_neighbors takes thre sub containers: # chunk - a container with (sequences, accesions)-tuples to be aligned (queries) # ref_seq_chunks - a container with (sequences, accesions)-tuples to be aligned to (references) # already_converged_chunks - a set of query sequences that has already converged try: res = pool.map_async(get_nearest_neighbors_helper, [ ((chunks[i][1], chunks[i][0], chunks[i][0] - ref_seq_chunks[i][0], ref_seq_chunks[i][1], params.neighbor_search_depth, params.ignore_ends_len), {}) for i in range(len(chunks))] ) best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals. except KeyboardInterrupt: print("Caught KeyboardInterrupt, terminating workers") pool.terminate() sys.exit() else: # print("Normal termination") pool.close() pool.join() best_edit_distances = {} for sub_graph in best_edit_distances_results: for seq in sub_graph: assert seq not in best_edit_distances best_edit_distances.update(sub_graph) # store only invariants here, i.e., edit distance 0 when ignoring ends! for acc1 in list(best_edit_distances.keys()): for acc2 in list(best_edit_distances[acc1].keys()): if best_edit_distances[acc1][acc2] != 0: del best_edit_distances[acc1][acc2] return best_edit_distances def is_overlap(text1, text2, ignore_ends_threshold): # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) # Eliminate the null case. if text1_length == 0 or text2_length == 0: return 0 # Truncate the longer string. if text1_length > text2_length: text1 = text1[-text2_length:] elif text1_length < text2_length: text2 = text2[:text1_length] # Quick check for the worst case. if text1 == text2: return min(text1_length, text2_length) # Start by looking for a single character match # and increase length until no match is found. best = 0 length = 1 while True: pattern = text1[-length:] found = text2.find(pattern) if found == -1: prefix_offset = text1_length - best suffix_offset = text2_length - best if prefix_offset <= ignore_ends_threshold and suffix_offset <= ignore_ends_threshold: return True else: return False length += found if text1[-length:] == text2[:length]: best = length length += 1 def get_invariants_under_ignored_edge_ends_speed(candidate_transcripts, candidate_support, params): edge_invariant_threshold = params.ignore_ends_len G = nx.DiGraph() for acc in candidate_transcripts: deg = candidate_support[acc] G.add_node(acc, degree = deg) sorted_lenghts = sorted(candidate_transcripts.items(), key = lambda x: len(x[1])) for i, (acc1, seq1) in enumerate(sorted_lenghts): if i % 1000 == 0: print(i, "candidates processed") for (acc2, seq2) in sorted_lenghts: if acc2 == acc1: continue if len(seq2) < len(seq1) - 2*edge_invariant_threshold: continue elif len(seq1) - 2*edge_invariant_threshold <= len(seq2) <= len(seq1): # is long enough to be merged if seq2 in seq1: # is strict substring start_offset = seq1.find(seq2) end_offset = len(seq1) - (start_offset + len(seq2)) if start_offset <= edge_invariant_threshold and end_offset <= edge_invariant_threshold: G.add_edge(acc2, acc1) # directed edge seq2 --> seq1 G.add_edge(acc1, acc2) # directed edge seq2 --> seq1 else: # check if perfect overlap within ends threshold bool_overlap1 = is_overlap(seq1, seq2, edge_invariant_threshold) bool_overlap2 = is_overlap(seq2, seq1, edge_invariant_threshold) if bool_overlap1 or bool_overlap2: G.add_edge(acc2, acc1) G.add_edge(acc1, acc2) else: break return G # def get_invariants_under_ignored_edge_ends_OLD(candidate_transcripts, candidate_support, params): # seq_to_acc = {seq: acc for (acc, seq) in candidate_transcripts.items()} # seq_to_acc_list = list(seq_to_acc.items()) # seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0])) # invariant_graph = get_invariants_under_ignored_edge_ends(seq_to_acc_list_sorted, params) # G = nx.DiGraph() # for acc in candidate_transcripts: # deg = candidate_support[acc] # G.add_node(acc, degree = deg) # # add edges # for acc1 in invariant_graph: # for acc2 in invariant_graph[acc1]: # G.add_edge(acc1, acc2) # return G def collapse_candidates_under_ends_invariant(candidate_transcripts, candidate_support, params): print("Final candidates before edge invariants:", len(candidate_transcripts)) # start = time() # G_old = get_invariants_under_ignored_edge_ends_OLD(candidate_transcripts, candidate_support, params) # elapsed = time() - start # print("INVARIANTS OLD:", elapsed) start = time() G = get_invariants_under_ignored_edge_ends_speed(candidate_transcripts, candidate_support, params) elapsed = time() - start print("INVARIANTS NEW:", elapsed) # print(len(list(G_old.nodes())), len(list(G.nodes())) ) # print(len(list(G_old.edges())), len(list(G.edges())) ) # for edge in list(G.edges()): # if not G_old.has_edge(*edge): # print(edge[0]) # print(candidate_transcripts[edge[0]]) # print(edge[1]) # print(candidate_transcripts[edge[1]]) # print() # print(nx.is_isomorphic(G_old, G)) # seq_to_acc = {seq: acc for (acc, seq) in candidate_transcripts.items()} # seq_to_acc_list = list(seq_to_acc.items()) # seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0])) # invariant_graph = get_invariants_under_ignored_edge_ends(seq_to_acc_list_sorted, params) # G = nx.DiGraph() # for acc in candidate_transcripts: # deg = candidate_support[acc] # G.add_node(acc, degree = deg) # # add edges # for acc1 in invariant_graph: # for acc2 in invariant_graph[acc1]: # G.add_edge(acc1, acc2) G_tmp = copy.deepcopy(G) G_star, partition, M = partition_highest_reachable_with_edge_degrees(G_tmp, params) # _, partition_old, _ = partition_highest_reachable_with_edge_degrees(G_old, params) # print("Final candidates old:", len(partition_old)) print("Final candidates:", len(partition)) # sys.exit() # # SAM_file = minimap2_alignment_module.align(targets, queries, nr_cores) # sorted_lenghts = sorted(candidate_transcripts.items(), key = lambda x: len(x[1])) # for i, (acc1, seq1) in enumerate(sorted_lenghts): # if i % 1000 == 0: # print(i, "candidates processed") # for (acc2, seq2) in sorted_lenghts: # if acc2 == acc1: # continue # if len(seq2) > len(seq1): # break # if len(seq2) >= len(seq1) - 2*params.ignore_ends_len: # is long enough to be merged # if seq2 in seq1: # # has to be within ends varinat lenghth: # start_offset = seq1.find(seq2) # end_offset = len(seq1) - (start_offset + len(seq2)) # if start_offset <= params.ignore_ends_len and end_offset <= params.ignore_ends_len: # #Make sure this doesnet crach out statistical test if two candidates differ only in ends!!! # G.add_edge(acc2, acc1) # directed edge seq2 --> seq1 # # sort order: largest number of neigbors, then largest degree. If still tie, sort by smallest string # G_transpose = nx.reverse(G) # largest_nr_neighbors = sorted([ (len(list(G_transpose.neighbors(n))), G_transpose.node[n]["degree"], n) for n in sorted(G_transpose.nodes())], key= lambda x: (-x[0], -x[1], x[2]) ) # marked = set() # partition = {} # for nr_nbrs, deg, c in largest_nr_neighbors: # if c not in marked: # nbrs = [n for n in G_transpose.neighbors(c) if n not in marked] # partition[c] = set(nbrs) # marked.add(c) # marked.update(nbrs) if params.verbose: for t in partition: print(t, partition[t]) print("Final candidates after edge invariants:", len(partition)) print() return partition def main(args): candidate_transcripts = {acc: seq for (acc, seq) in read_fasta(open(args.candidate_transcripts, 'r'))} candidate_support = {} for (acc, seq) in read_fasta(open(args.candidate_transcripts, 'r')): supp = acc.split("_support_")[1] candidate_support[acc] = int(supp) # print("Number of consensus:", len(candidate_transcripts)) seq_to_acc = {seq: acc for (acc, seq) in read_fasta(open(args.candidate_transcripts, 'r'))} seq_to_acc_list = list(seq_to_acc.items()) seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0])) collapsed_candidate_transcripts = { acc : seq for (seq, acc) in seq_to_acc.items() } # print("Number of collapsed consensus:", len(collapsed_candidate_transcripts)) assert len(collapsed_candidate_transcripts) == len(candidate_transcripts) # all transcripts should be unique at this point nearest_neighbor_graph = get_invariants_under_ignored_edge_ends(seq_to_acc_list_sorted, args) outfile = open(args.outfile, "w") edges = 0 tot_ed = 0 for acc1 in nearest_neighbor_graph: seq1 = candidate_transcripts[acc1] for acc2 in nearest_neighbor_graph[acc1]: seq2 = candidate_transcripts[acc2] edges += 1 tot_ed += nearest_neighbor_graph[acc1][acc2] outfile.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(acc1, candidate_support[acc1], acc2, candidate_support[acc2], nearest_neighbor_graph[acc1][acc2])) print("Number of edges:", edges) print("Total edit distance:", tot_ed) if float(edges) > 0: print("Avg ed (ed/edges):", tot_ed/ float(edges)) # convert nearest_neighbor graph to nx graph object G = nx.DiGraph() # add nodes for acc in candidate_transcripts: deg = candidate_support[acc] G.add_node(acc, degree = deg) # add edges for acc1 in nearest_neighbor_graph: for acc2 in nearest_neighbor_graph[acc1]: G.add_edge(acc1, acc2) G_star, partition, M = partition_highest_reachable_with_edge_degrees(G, params) print("candidates after edge invariants:", len(partition)) # for t in partition: # print(t) # print(partition[t]) # print() if __name__ == '__main__': parser = argparse.ArgumentParser(description="Print nearest_neighbor graph allowing for mismatches in ends.") parser.add_argument('candidate_transcripts', type=str, help='Path to the consensus fasta file') parser.add_argument('outfile', type=str, help='Outfile of results') parser.add_argument('--ignore_ends_len', type=int, default=15, help='Number of bp to ignore in ends. If two candidates are identical expept in ends of this size, they are collapses and the longest common substing is chosen to represent them. In statistical test step, nearest_neighbors are found based on ignoring the ends of this size. Also indels in ends will not be tested. [default ignore_ends_len=15].') parser.add_argument('--single_core', dest='single_core', action='store_true', help='Force working on single core. ') parser.add_argument('--neighbor_search_depth', type=int, default=2**32, help='Maximum number of pairwise alignments in search matrix to find nearest_neighbor. [default =2**32]') args = parser.parse_args() main(args)
PypiClean
/GP_Framework_BYU_HCMI-0.0.10.tar.gz/GP_Framework_BYU_HCMI-0.0.10/gp_framework/report.py
import plotly.graph_objects as go from plotly.subplots import make_subplots import csv from typing import Dict from gp_framework.fitness_calculator import * from gp_framework.population_manager import LifecycleReport from gp_framework import config _END_OF_METADATA = ["End of metadata"] def generate_many_reports(header: List[str], name_to_reports: Dict[str, List[LifecycleReport]], name_to_metadata: Dict[str, List[List[any]]], elements_per_point): for item in name_to_reports.items(): # the [] in name_to_metadata.get is the value returned if item[0] is not a valid key generate_csv(item[0] + '.csv', header, [r.to_list() for r in item[1]], name_to_metadata.get(item[0], [])) generate_plot_from_csv(item[0] + '.csv', elements_per_point, item[0]) def generate_csv(csv_name: str, header: List[any], rows: List[List[any]], metadata: List[List[any]]): with open("csvs/" + csv_name, 'w') as csv_file: csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_NONNUMERIC) csv_writer.writerows(metadata) csv_writer.writerow(_END_OF_METADATA) csv_writer.writerow(header) csv_writer.writerows(rows) def _average(floats: List[float]) -> float: total = 0 for i in floats: total += i return total/len(floats) def _combine_list_elements(list_: List[float], group_size: int) -> List[float]: combined_list = [] starting_indices = [i for i in range(0, len(list_), group_size)] end_index = len(list_) + 1 for index in starting_indices: combined_list.append(_average(list_[index:min(index+group_size, end_index)])) return combined_list def _transpose_list_of_lists(list_of_lists: List[List[any]]) -> List[List[any]]: """ This assumes that all inner_lists have the same length :param list_of_lists: the list to transpose :return: the transposed list """ new_list = [] for i in range(len(list_of_lists[0])): new_list.append([]) for j in range(len(list_of_lists)): new_list[i].append(list_of_lists[j][i]) return new_list def generate_plot_from_csv(csv_name: str, elements_per_point: int, output_name: str) -> None: """ Makes nice plots to help visualize data :param csv_name: Name of csv file to draw data from :param elements_per_point: How many data points to average into one point on the plot :return: """ labels: List[str] data: List[List[float]] = [] # read the csv file into a list of lists with open("csvs/{}".format(csv_name), 'r') as file: reader = csv.reader(file, quoting=csv.QUOTE_NONNUMERIC) # scan until the end of the metadata is reached for row in reader: if row == _END_OF_METADATA: break labels = next(reader) for row in reader: data.append(row) data = _transpose_list_of_lists(data) # combine the elements of data for i in range(len(data)): data[i] = _combine_list_elements(data[i], elements_per_point) # fig = make_subplots(rows=len(data), cols=1, subplot_titles=labels) fig = go.Figure() for i in range(len(data)): fig.add_trace(go.Scatter(x=[j for j in range(len(data[i]))], y=data[i], name=labels[i])) fig.update_layout(height=1000, width=1000*len(data), title_text=output_name) if config.CONFIG.save_plots: fig.write_html("plots/{}.html".format(output_name)) if config.CONFIG.show_plots: fig.show()
PypiClean
/Ardy-0.0.6.tar.gz/Ardy-0.0.6/ardy/core/cmd/main.py
from __future__ import unicode_literals, print_function import argparse import sys import traceback from ardy.config import GlobalConfig from ardy.core.build import Build from ardy.core.deploy import Deploy from ardy.utils.log import logger class Command(object): config = None parser = None args = [] def __init__(self, *args, **kwargs): arguments = kwargs.get("arguments", False) self.exit_at_finish = kwargs.get("exit_at_finish", True) if not arguments: arguments = sys.argv[1:] self.parser = self.init_config(arguments) commands = self.parser.add_subparsers(title="Commands", description='Available commands', dest='command_name') # Add deploy commands parser_deploy = commands.add_parser('deploy', help='Upload functions to AWS Lambda') parser_deploy.add_argument("lambdafunctions", default="_ALL_", nargs='*', type=str, help='Lambda(s) to deploy') parser_deploy.add_argument("-z", "--zipfile", help="Path and filename of artefact to deploy") environments = self.config["deploy"].get("deploy_environments", []) if environments: parser_deploy.add_argument("environment", choices=environments, type=str, help='Environment where deploy: {}'.format(environments)) # Add invoke commands parser_invoke = commands.add_parser('invoke', help='Invoke a functions from AWS Lambda') parser_invoke.add_argument("-l", "--lambda-function", help="lambda") # Add build commands parser_build = commands.add_parser('build', help='Create an artefact and Upload to S3 if S3 is configured (See config)') parser_build.add_argument("-r", "--requirements", help="Path and filename of the python project") self.args = self.parser.parse_args(arguments) try: result = self.parse_commandline() if result: self.exit_ok("OK") except Exception as e: # traceback = sys.exc_info()[2] logger.error(traceback.format_exc()) self.exit_with_error("ERROR") @property def parser_base(self): parser = argparse.ArgumentParser(description='Ardy. AWS Lambda Toolkit') parser.add_argument("-f", "--conffile", help="Name to the project config file") parser.add_argument("-p", "--project", help="Project path") return parser def init_config(self, arguments): # TODO: refactor this method... sooo ugly :S parser = self.parser_base parser.add_argument('args', nargs=argparse.REMAINDER) base_parser = parser.parse_args(arguments) params = {} if getattr(base_parser, "project", False) and base_parser.project is not None: params["path"] = base_parser.project if getattr(base_parser, "conffile", False) and base_parser.conffile is not None: params["filename"] = base_parser.conffile self.config = GlobalConfig(**params) return self.parser_base def parse_commandline(self): params = {} run_params = {} result = False if self.args.command_name == "deploy": if self.args.lambdafunctions and self.args.lambdafunctions is not "_ALL_": params["lambdas_to_deploy"] = self.args.lambdafunctions if getattr(self.args, "environment", False): params["environment"] = self.args.environment if getattr(self.args, "zipfile", False): run_params["path_to_zip_file"] = self.args.zipfile deploy = Deploy(config=self.config, **params) result = deploy.run(**run_params) elif self.args.command_name == "invoke": pass elif self.args.command_name == "build": if getattr(self.args, "requirements", False): run_params["requirements"] = self.args.requirements build = Build(config=self.config) result = build.run(**params) else: self.parser.print_help() return result def exit_with_error(self, msg=""): self.print_error(msg) if self.exit_at_finish: sys.exit(2) def exit_ok(self, msg=""): self.print_ok(msg) if self.exit_at_finish: sys.exit(0) @staticmethod def print_ok(msg=""): print('\033[92m\033[1m ' + msg + ' \033[0m\033[0m') @staticmethod def print_error(msg=""): print('\033[91m\033[1m ' + msg + ' \033[0m\033[0m') if __name__ == '__main__': cmd = Command(arguments=sys.argv[1:])
PypiClean
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/db/backends/sqlite3/introspection.py
import re from django.db.backends import BaseDatabaseIntrospection field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$') def get_field_size(name): """ Extract the size number from a "varchar(11)" type name """ m = field_size_re.search(name) return int(m.group(1)) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict(object): # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { 'bool': 'BooleanField', 'boolean': 'BooleanField', 'smallint': 'SmallIntegerField', 'smallint unsigned': 'PositiveSmallIntegerField', 'smallinteger': 'SmallIntegerField', 'int': 'IntegerField', 'integer': 'IntegerField', 'bigint': 'BigIntegerField', 'integer unsigned': 'PositiveIntegerField', 'decimal': 'DecimalField', 'real': 'FloatField', 'text': 'TextField', 'char': 'CharField', 'date': 'DateField', 'datetime': 'DateTimeField', 'time': 'TimeField', } def __getitem__(self, key): key = key.lower() try: return self.base_data_types_reverse[key] except KeyError: size = get_field_size(key) if size is not None: return ('CharField', {'max_length': size}) raise KeyError class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_table_list(self, cursor): "Returns a list of table names in the current database." # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND NOT name='sqlite_sequence' ORDER BY name""") return [row[0] for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): "Returns a description of the table, with the DB-API cursor.description interface." return [(info['name'], info['type'], None, info['size'], None, None, info['null_ok']) for info in self._table_info(cursor, table_name)] def get_relations(self, cursor, table_name): """ Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based. """ # Dictionary of relations to return relations = {} # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue table, column = [s.strip('"') for s in m.groups()] cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table]) result = cursor.fetchall()[0] other_table_results = result[0].strip() li, ri = other_table_results.index('('), other_table_results.rindex(')') other_table_results = other_table_results[li+1:ri] for other_index, other_desc in enumerate(other_table_results.split(',')): other_desc = other_desc.strip() if other_desc.startswith('UNIQUE'): continue name = other_desc.split(' ', 1)[0].strip('"') if name == column: relations[field_index] = (other_index, table) break return relations def get_key_columns(self, cursor, table_name): """ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all key columns in given table. """ key_columns = [] # Schema for this table cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] # Walk through and look for references to other tables. SQLite doesn't # really have enforced references, but since it echoes out the SQL used # to create the table we can look for REFERENCES statements used there. for field_index, field_desc in enumerate(results.split(',')): field_desc = field_desc.strip() if field_desc.startswith("UNIQUE"): continue m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I) if not m: continue # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns key_columns.append(tuple([s.strip('"') for s in m.groups()])) return key_columns def get_indexes(self, cursor, table_name): indexes = {} for info in self._table_info(cursor, table_name): if info['pk'] != 0: indexes[info['name']] = {'primary_key': True, 'unique': False} cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name)) # seq, name, unique for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]: cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index)) info = cursor.fetchall() # Skip indexes across multiple fields if len(info) != 1: continue name = info[0][2] # seqno, cid, name indexes[name] = {'primary_key': False, 'unique': unique} return indexes def get_primary_key_column(self, cursor, table_name): """ Get the column name of the primary key for the given table. """ # Don't use PRAGMA because that causes issues with some transactions cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"]) results = cursor.fetchone()[0].strip() results = results[results.index('(')+1:results.rindex(')')] for field_desc in results.split(','): field_desc = field_desc.strip() m = re.search('"(.*)".*PRIMARY KEY$', field_desc) if m: return m.groups()[0] return None def _table_info(self, cursor, name): cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name)) # cid, name, type, notnull, dflt_value, pk return [{'name': field[1], 'type': field[2], 'size': get_field_size(field[2]), 'null_ok': not field[3], 'pk': field[5] # undocumented } for field in cursor.fetchall()]
PypiClean
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/pyrogram/handlers/raw_update_handler.py
from typing import Callable from .handler import Handler class RawUpdateHandler(Handler): """The Raw Update handler class. Used to handle raw updates. It is intended to be used with :meth:`~pyrogram.Client.add_handler` For a nicer way to register this handler, have a look at the :meth:`~pyrogram.Client.on_raw_update` decorator. Parameters: callback (``Callable``): A function that will be called when a new update is received from the server. It takes *(client, update, users, chats)* as positional arguments (look at the section below for a detailed description). Other Parameters: client (:obj:`~pyrogram.Client`): The Client itself, useful when you want to call other API methods inside the update handler. update (``Update``): The received update, which can be one of the many single Updates listed in the :obj:`~pyrogram.raw.base.Update` base type. users (``dict``): Dictionary of all :obj:`~pyrogram.types.User` mentioned in the update. You can access extra info about the user (such as *first_name*, *last_name*, etc...) by using the IDs you find in the *update* argument (e.g.: *users[1768841572]*). chats (``dict``): Dictionary of all :obj:`~pyrogram.types.Chat` and :obj:`~pyrogram.raw.types.Channel` mentioned in the update. You can access extra info about the chat (such as *title*, *participants_count*, etc...) by using the IDs you find in the *update* argument (e.g.: *chats[1701277281]*). Note: The following Empty or Forbidden types may exist inside the *users* and *chats* dictionaries. They mean you have been blocked by the user or banned from the group/channel. - :obj:`~pyrogram.raw.types.UserEmpty` - :obj:`~pyrogram.raw.types.ChatEmpty` - :obj:`~pyrogram.raw.types.ChatForbidden` - :obj:`~pyrogram.raw.types.ChannelForbidden` """ def __init__(self, callback: Callable): super().__init__(callback)
PypiClean
/101903683_kunal_topsis-v1.2.tar.gz/101903683_kunal_topsis-v1.2/101903683_kunal_topsis/topsis.py
import sys import pandas as pd import numpy as np def normalized_matrix(filename): '''To normalize each of the values in the csv file''' try: dataset = pd.read_csv(filename) #loading the csv file into dataset if len(dataset.axes[1])<3: print("Number of columns should be greater than 3") sys.exit(1) attributes = dataset.iloc[:,1:].values '''the attributes and alternatives are 2-D numpy arrays''' sum_cols=[0]*len(attributes[0]) #1-D array with size equal to the nummber of columns in the attributes array for i in range(len(attributes)): for j in range(len(attributes[i])): sum_cols[j]+=np.square(attributes[i][j]) for i in range(len(sum_cols)): sum_cols[i]=np.sqrt(sum_cols[i]) for i in range(len(attributes)): for j in range(len(attributes[i])): attributes[i][j]=attributes[i][j]/sum_cols[j] return (attributes) except Exception as e: print(e) def weighted_matrix(attributes,weights): ''' To multiply each of the values in the attributes array with the corresponding weights of the particular attribute''' try: weights=weights.split(',') for i in range(len(weights)): weights[i]=float(weights[i]) weighted_attributes=[] for i in range(len(attributes)): temp=[] for j in range(len(attributes[i])): temp.append(attributes[i][j]*weights[j]) weighted_attributes.append(temp) return(weighted_attributes) except Exception as e: print(e) def impact_matrix(weighted_attributes,impacts): try: impacts=impacts.split(',') Vjpositive=[] Vjnegative=[] for i in range(len(weighted_attributes[0])): Vjpositive.append(weighted_attributes[0][i]) Vjnegative.append(weighted_attributes[0][i]) for i in range(1,len(weighted_attributes)): for j in range(len(weighted_attributes[i])): if impacts[j]=='+': if weighted_attributes[i][j]>Vjpositive[j]: Vjpositive[j]=weighted_attributes[i][j] elif weighted_attributes[i][j]<Vjnegative[j]: Vjnegative[j]=weighted_attributes[i][j] elif impacts[j]=='-': if weighted_attributes[i][j]<Vjpositive[j]: Vjpositive[j]=weighted_attributes[i][j] elif weighted_attributes[i][j]>Vjnegative[j]: Vjnegative[j]=weighted_attributes[i][j] Sjpositive=[0]*len(weighted_attributes) Sjnegative=[0]*len(weighted_attributes) for i in range(len(weighted_attributes)): for j in range(len(weighted_attributes[i])): Sjpositive[i]+=np.square(weighted_attributes[i][j]-Vjpositive[j]) Sjnegative[i]+=np.square(weighted_attributes[i][j]-Vjnegative[j]) for i in range(len(Sjpositive)): Sjpositive[i]=np.sqrt(Sjpositive[i]) Sjnegative[i]=np.sqrt(Sjnegative[i]) Performance_score=[0]*len(weighted_attributes) for i in range(len(weighted_attributes)): Performance_score[i]=Sjnegative[i]/(Sjnegative[i]+Sjpositive[i]) return(Performance_score) except Exception as e: print(e) def rank(filename,weights,impacts,resultfilename): try: a = normalized_matrix(filename) c = weighted_matrix(a,weights) d = impact_matrix(c,impacts) dataset = pd.read_csv(filename) dataset['topsis score']="" dataset['topsis score']=d copi=d.copy() copi.sort(reverse=True) Rank=[] for i in range(0,len(d)): temp=d[i] for j in range(0,len(copi)): if temp==copi[j]: Rank.append(j+1) break dataset['Rank']="" dataset['Rank']=Rank dataset.to_csv(resultfilename,index=False) except Exception as e: print(e)
PypiClean
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/plugins/standard/GlfwPlugin.py
import os from nuitka import Options from nuitka.plugins.PluginBase import NuitkaPluginBase from nuitka.utils.FileOperations import getFileContentByLine from nuitka.utils.ModuleNames import ModuleName from nuitka.utils.Utils import isLinux, isMacOS, isWin32Windows # spell-checker: ignore glfw,opengl,osmesa,pyglfw,xwayland class NuitkaPluginGlfw(NuitkaPluginBase): """This class represents the main logic of the glfw plugin. This is a plugin to ensure that glfw platform specific backends are loading properly. This need to include the correct DLL and make sure it's used by setting an environment variable. """ # TODO: Maybe rename to opengl maybe plugin_name = "glfw" # Nuitka knows us by this name plugin_desc = "Required for OpenGL and glfw in standalone mode" @staticmethod def isAlwaysEnabled(): return True @classmethod def isRelevant(cls): """Check whether plugin might be required. Returns: True if this is a standalone compilation. """ return Options.isStandaloneMode() def getImplicitImports(self, module): # Dealing with OpenGL is a bit detailed, pylint: disable=too-many-branches if module.getFullName() == "OpenGL": opengl_infos = self.queryRuntimeInformationSingle( setup_codes="import OpenGL.plugins", value="[(f.name, f.import_path) for f in OpenGL.plugins.FormatHandler.all()]", ) # TODO: Filter by name. for _name, import_path in opengl_infos: yield ModuleName(import_path).getPackageName() for line in getFileContentByLine(module.getCompileTimeFilename()): line = line.partition("#")[0] if line.startswith("PlatformPlugin("): os_part, plugin_name_part = line[15:-1].split(",") os_part = os_part.strip("' ") plugin_name_part = plugin_name_part.strip(") '") plugin_name_part = plugin_name_part[: plugin_name_part.rfind(".")] if os_part == "nt": if isWin32Windows(): yield plugin_name_part elif os_part.startswith("linux"): if isLinux(): yield plugin_name_part elif os_part.startswith("darwin"): if isMacOS(): yield plugin_name_part elif os_part.startswith( ("posix", "osmesa", "egl", "x11", "wayland", "xwayland") ): if not isWin32Windows() and not isMacOS(): yield plugin_name_part else: self.sysexit( "Undetected OS specific glfw plugin '%s', please report bug for." % os_part ) def _getDLLFilename(self): glfw_info = self.queryRuntimeInformationMultiple( info_name="glfw_info", setup_codes="import glfw.library", values=(("dll_filename", "glfw.library.glfw._name"),), ) return glfw_info.dll_filename def getExtraDlls(self, module): if module.getFullName() == "glfw": dll_filename = self._getDLLFilename() yield self.makeDllEntryPoint( source_path=dll_filename, dest_path=os.path.join("glfw", os.path.basename(dll_filename)), package_name="glfw.library", reason="needed by 'glfw'", ) def createPreModuleLoadCode(self, module): if module.getFullName() == "glfw": dll_filename = self._getDLLFilename() code = r""" import os os.environ["PYGLFW_LIBRARY"] = os.path.join(__nuitka_binary_dir, "glfw", %r) """ % os.path.basename( dll_filename ) return ( code, "Setting 'PYGLFW_LIBRARY' environment variable for glfw to find platform DLL.", )
PypiClean
/HermesCache-0.10.0.tar.gz/HermesCache-0.10.0/hermes/__init__.py
import asyncio import base64 import functools import hashlib import inspect import os import pickle import types import typing import warnings import zlib from typing import Any, Callable, Coroutine, Dict, Iterable, Optional, Sequence, Tuple, Type, Union from .backend import AbstractBackend __all__ = 'Hermes', 'HermesError', 'Mangler', 'Cached', 'CachedCoro', 'Serialiser', 'Compressor' class Serialiser(typing.NamedTuple): '''Serialisation delegate.''' dumps: Callable[[Any], bytes] '''Serialise cache value.''' loads: Callable[[bytes], Any] '''Deserialise cache value.''' class Compressor(typing.NamedTuple): '''Compression delegate.''' compress: Callable[[bytes], bytes] '''Compress serialised cache value.''' decompress: Callable[[bytes], bytes] '''Decompress serialised cache value.''' decompressError: Union[Type[Exception], Tuple[Type[Exception], ...]] '''Decompression error(s) that indicate uncompressed payload.''' compressMinLength: int = 0 '''Minimal length of payload in bytes to trigger compression.''' class Mangler: '''Key manager responsible for creating keys, hashing and serialisation.''' prefix = 'cache' '''Prefix for cache and tag entries.''' serialiser = Serialiser(pickle.dumps, pickle.loads) '''Serialisation delegate.''' compressor = Compressor(zlib.compress, zlib.decompress, zlib.error, 100) '''Optional compression delegate.''' def hash(self, value: bytes) -> str: ''' Hash value. :return: base64 encoded MD5 hash of the value. ''' return base64.urlsafe_b64encode(hashlib.md5(value).digest()).strip(b'=').decode() def dumps(self, value) -> bytes: '''Serialise and conditionally compress value.''' result = self.serialiser.dumps(value) if self.compressor and len(result) >= self.compressor.compressMinLength: result = self.compressor.compress(result) return result def loads(self, value: bytes): '''Conditionally decompress and deserialise value.''' if self.compressor: try: value = self.compressor.decompress(value) except self.compressor.decompressError: # It is expected that the error indicates that the value is # shorter than compressMinLength pass return self.serialiser.loads(value) def nameEntry(self, fn: Callable, *args, **kwargs) -> str: ''' Return cache key for given callable and its positional and keyword arguments. Note how callable, ``fn``, is represented in the cache key: 1) a ``types.MethodType`` instance -> names of ``(module, class, method)`` 2) a ``types.FunctionType`` instance -> names of ``(module, function)`` 3) other callalbe objects with ``__name__`` -> name of ``(module, object)`` This means that if two function are defined dynamically in the same module with same names, like:: def createF1(): @cache def f(a, b): return a + b return f def createF2(): @cache def f(a, b): return a * b return f print(createF1()(1, 2)) print(createF2()(1, 2)) Both will return `3`, because cache keys will clash. In such cases you need to pass ``key`` with custom key function. It can also be that an object in case 3 doesn't have name, or its name isn't unique, then a ``nameEntry`` should be overridden with something that represents it uniquely, like ``repr(fn).rsplit(' at 0x', 1)[0]`` (address should be stripped so after Python process restart the cache can still be valid and usable). ''' result = [self.prefix, 'entry'] if callable(fn): try: # types.MethodType result.extend([ fn.__module__, fn.__self__.__class__.__name__, # type: ignore[attribute-error] fn.__name__, ]) except AttributeError: try: # types.FunctionType and other object with __name__ result.extend([fn.__module__, fn.__name__]) except AttributeError: raise HermesError( 'fn is callable but its name is undefined, consider overriding Mangler.nameEntry' ) else: raise HermesError('fn is expected to be callable') arguments = args, tuple(sorted(kwargs.items())) result.append(self.hash(self.dumps(arguments))) return ':'.join(result) def nameTag(self, tag: str) -> str: '''Build fully qualified backend tag name.''' return ':'.join([self.prefix, 'tag', tag]) def mapTags(self, tagKeys: Iterable[str]) -> Dict[str, str]: '''Map tags to random values for seeding.''' rnd = os.urandom(4).hex() return {key: self.hash(':'.join((key, rnd)).encode()) for key in tagKeys} def hashTags(self, tagMap: Dict[str, str]) -> str: '''Hash tags of a cache entry for the entry key,''' values = tuple(zip(*sorted(tagMap.items())))[1] # sorted by key dict values return self.hash(':'.join(values).encode()) def nameLock(self, entryKey: str) -> str: ''' Create fully qualified backend lock key for the entry key. :param entryKey: Entry key to create a lock key for. If given entry key is already a colon-separated key name with first component equal to :attr:`prefix`, first to components are dropped. For instance: - ``foo`` → ``cache:lock:foo`` - ``cache:entry:fn:tagged:78d64ea049a57494`` → ``cache:lock:fn:tagged:78d64ea049a57494`` ''' parts = entryKey.split(':') if parts[0] == self.prefix: entryKey = ':'.join(parts[2:]) return ':'.join([self.prefix, 'lock', entryKey]) class Cached: '''Cache-point wrapper for callables and descriptors.''' _frontend: 'Hermes' ''' Hermes instance which provides backend and mangler instances, and TTL fallback value. ''' _callable: Callable ''' The decorated callable, stays ``types.FunctionType`` if a function is decorated, otherwise it is transformed to ``types.MethodType`` on the instance clone by descriptor protocol implementation. It can also be a method descriptor which is also transformed accordingly to the descriptor protocol (e.g. ``staticmethod`` and ``classmethod``). ''' _isDescriptor: bool '''Flag defining if the callable is a method descriptor.''' _isMethod: bool '''Flag defining if the callable is a method.''' _ttl: Optional[int] '''Cache entry Time To Live for decorated callable.''' _keyFunc: Optional[Callable] '''Key creation function.''' _tags: Sequence[str] '''Cache entry tags for decorated callable.''' def __init__( self, frontend: 'Hermes', callable: Callable, *, ttl: Optional[int] = None, key: Optional[Callable] = None, tags: Sequence[str] = (), ): self._frontend = frontend self._ttl = ttl self._keyFunc = key self._tags = tags self._callable = callable self._isDescriptor = inspect.ismethoddescriptor(callable) self._isMethod = inspect.ismethod(callable) # preserve ``__name__``, ``__doc__``, etc functools.update_wrapper(self, callable) def _load(self, key): if self._tags: tagMap = self._frontend.backend.load(map(self._frontend.mangler.nameTag, self._tags)) if len(tagMap) != len(self._tags): return None else: key += ':' + self._frontend.mangler.hashTags(tagMap) return self._frontend.backend.load(key) def _save(self, key, value): if self._tags: namedTags = tuple(map(self._frontend.mangler.nameTag, self._tags)) tagMap = self._frontend.backend.load(namedTags) missingTags = set(namedTags) - set(tagMap.keys()) if missingTags: missingTagMap = self._frontend.mangler.mapTags(missingTags) self._frontend.backend.save(mapping = missingTagMap, ttl = None) tagMap.update(missingTagMap) assert len(self._tags) == len(tagMap) key += ':' + self._frontend.mangler.hashTags(tagMap) ttl = self._ttl if self._ttl is not None else self._frontend.ttl return self._frontend.backend.save({key: value}, ttl = ttl) def _remove(self, key): if self._tags: tagMap = self._frontend.backend.load(map(self._frontend.mangler.nameTag, self._tags)) if len(tagMap) != len(self._tags): return else: key += ':' + self._frontend.mangler.hashTags(tagMap) self._frontend.backend.remove(key) def _key(self, *args, **kwargs): keyFunc = self._keyFunc or self._frontend.mangler.nameEntry return keyFunc(self._callable, *args, **kwargs) def invalidate(self, *args, **kwargs): ''' Invalidate the cache entry. Invalidated entry corresponds to the wrapped callable called with given ``args`` and ``kwargs``. ''' self._remove(self._key(*args, **kwargs)) def __call__(self, *args, **kwargs): '''Get the value of the wrapped callable.''' key = self._key(*args, **kwargs) value = self._load(key) if value is None: with self._frontend.backend.lock(key): # it's better to read twice than lock every read value = self._load(key) if value is None: value = self._callable(*args, **kwargs) self._save(key, value) return value def __get__(self, instance, type): ''' Implements non-data descriptor protocol. The invocation happens only when instance method is decorated, so we can distinguish between decorated ``types.MethodType`` and ``types.FunctionType``. Python class declaration mechanics prevent a decorator from having awareness of the class type, as the function is received by the decorator before it becomes an instance method. How it works:: cache = hermes.Hermes() class Model: @cache def calc(self): return 42 m = Model() m.calc Last attribute access results in the call, ``calc.__get__(m, Model)``, where ``calc`` is instance of :class:`Cached` which decorates the original ``Model.calc``. Note, initially :class:`Cached` is created on decoration per class method, when class type is created by the interpreter, and is shared among all instances. Later, on attribute access, a copy is returned with bound ``_callable``, just like ordinary Python method descriptor works. For more details, `descriptor-protocol <http://docs.python.org/3/howto/descriptor.html#descriptor-protocol>`_. ''' if instance is not None and self._isDescriptor: return self._copy(self._callable.__get__(instance, type)) # type: ignore[attribute-error] elif instance is not None and not self._isMethod: return self._copy(types.MethodType(self._callable, instance)) else: return self def _copy(self, callable): ''' Create a shallow copy of self with ``_callable`` replaced to given instance. ''' boundCached = object.__new__(self.__class__) boundCached.__dict__ = self.__dict__.copy() boundCached._callable = callable return boundCached class CachedCoro(Cached): ''' Cache-point wrapper for coroutine functions. The implementation uses the default thread pool of ``asyncio`` to execute synchronous functions of the cache backend, and manage their (distributed) locks. ''' async def _run(self, fn, *args, **kwargs) -> Coroutine: '''' Run run given function or coroutine function. If ``fn`` is a coroutine function it's called and awaited. Otherwise it's run in the thread pool. ''' if inspect.iscoroutinefunction(fn): return await fn(*args, **kwargs) loop = asyncio.get_event_loop() return await loop.run_in_executor(None, functools.partial(fn, *args, **kwargs)) async def invalidate(self, *args, **kwargs): ''' Invalidate the cache entry. Invalidated entry corresponds to the wrapped coroutine function called with given ``args`` and ``kwargs``. ''' await self._run(super().invalidate, *args, **kwargs) async def __call__(self, *args, **kwargs): '''Get the value of the wrapped coroutine function's coroutine.''' key = self._key(*args, **kwargs) value = await self._run(self._load, key) if value is None: lock = self._frontend.backend.lock(key) await self._run(lock.acquire) try: value = await self._run(self._load, key) if value is None: value = await self._callable(*args, **kwargs) await self._run(self._save, key, value) finally: await self._run(lock.release) return value def cachedfactory(frontend: 'Hermes', fn, **kwargs) -> Cached: ''' Create a cache-point object from the callable. :argument frontend: Cache frontend instance. :argument fn: Must be coroutine function, callable or method descriptor. ''' isdescr = inspect.ismethoddescriptor(fn) if ( inspect.iscoroutinefunction(fn) or isdescr and inspect.iscoroutinefunction(getattr(fn, '__func__', None)) ): return CachedCoro(frontend, fn, **kwargs) elif callable(fn) or isdescr: return Cached(frontend, fn, **kwargs) else: raise HermesError( 'First positional argument must be coroutine function, callable or method descriptor' ) class Hermes: ''' Cache façade. :argument backend: Class or instance of cache backend. If a class is passed, keyword arguments of passed to :obj:`Hermes` constructor will be bypassed to the class' constructor. If the argument is omitted no-op backend will be be used. :argument mangler: Optional, typically of a subclass, mangler instance. :argument cachedfactory: Optional, a cache-point factory for functions and coroutines. :argument ttl: Default cache entry time-to-live. Usage:: import hermes.backend.redis cache = hermes.Hermes( hermes.backend.redis.Backend, ttl = 600, host = 'localhost', db = 1 ) @cache def foo(a, b): return a * b class Example: @cache(tags = ('math', 'power'), ttl = 1200) def bar(self, a, b): return a ** b @cache( tags = ('math', 'avg'), key = lambda fn, *args, **kwargs: 'avg:{0}:{1}'.format(*args), ) def baz(self, a, b): return (a + b) / 2.0 print(foo(2, 333)) example = Example() print(example.bar(2, 10)) print(example.baz(2, 10)) foo.invalidate(2, 333) example.bar.invalidate(2, 10) example.baz.invalidate(2, 10) cache.clean(['math']) # invalidate entries tagged 'math' cache.clean() # flush cache ''' backend: AbstractBackend '''Cache backend.''' mangler: Mangler '''Key manager responsible for creating keys, hashing and serialisation.''' cachedfactory: Callable[..., Cached] '''Cache-point callable object factory.''' ttl: int '''Default cache entry time-to-live.''' def __init__( self, backend: Union[Type[AbstractBackend], AbstractBackend] = AbstractBackend, *, mangler: Optional[Mangler] = None, cachedfactory: Callable[..., Cached] = cachedfactory, ttl: int = 3600, **backendconf ): self.ttl = ttl mangler = mangler or Mangler() assert isinstance(mangler, Mangler) self.mangler = mangler if isinstance(backend, AbstractBackend): if backendconf: warnings.warn('Backend options ignored because backend instance is passed') self.backend = backend elif isinstance(backend, type) and issubclass(backend, AbstractBackend): self.backend = backend(self.mangler, **backendconf) else: raise HermesError('Expected class or instance of AbstractBackend') # type: ignore assert callable(cachedfactory) self.cachedfactory = cachedfactory def __call__( self, *args, ttl: Optional[int] = None, tags: Sequence[str] = (), key: Optional[Callable] = None, ): ''' Wrap the callable in a cache-point instance. Decorator that caches method or function result. The following key arguments are optional: Bare decorator, ``@cache``, is supported as well as a call with keyword arguments ``@cache(ttl = 7200)``. :argument ttl: Seconds until entry expiration, otherwise instance's default is used. :argument tags: Cache entry tag list. :argument key: Lambda that provides custom key, otherwise :obj:`Mangler.nameEntry` is used. ''' if args: # @cache return self.cachedfactory(self, args[0]) else: # @cache() return functools.partial(self.cachedfactory, self, ttl = ttl, tags = tags, key = key) def clean(self, tags: Sequence[str] = ()): ''' Clean all, or tagged with given tags, cache entries. :argument tags: If this argument is omitted the call flushes all cache entries, otherwise only the entries tagged by given tags are flushed. ''' if tags: self.backend.remove(map(self.mangler.nameTag, tags)) else: self.backend.clean() class HermesError(Exception): '''Generic Hermes error.'''
PypiClean
/GReNaDIne-0.0.21.tar.gz/GReNaDIne-0.0.21/docs/source/index.rst
.. grenadine documentation master file, created by sphinx-quickstart on Mon Jul 1 09:51:38 2019. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to grenadine's documentation! ===================================== .. image:: _static/logo_grenadine_white.png :width: 150 :align: center .. toctree:: :maxdepth: 2 :caption: Contents: grenadine grenadine.Preprocessing grenadine.Inference grenadine.Evaluation Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`
PypiClean
/FJUtils-0.0.16-py3-none-any.whl/fjutils/utils.py
import importlib.util import os import jax import msgpack from typing import List, Optional, Callable, Any from fjutils.checkpointing import StreamingCheckpointer from jax import numpy as jnp import numpy as np import json import re from jax.sharding import PartitionSpec as PS import flax from jax.interpreters import pxla from fjutils.easylm import with_sharding_constraint from flax.serialization import from_bytes, to_bytes, to_state_dict from flax.traverse_util import flatten_dict from fjutils.easylm import float_tensor_to_dtype def is_torch_available(): return True if importlib.util.find_spec('torch') is not None else False def match_partition_rules(rules, params): def get_partition_spec(name, leaf): if len(leaf.shape) == 0 or np.prod(leaf.shape) == 1: return PS() for rule, ps in rules: if re.search(rule, name) is not None: return ps raise ValueError(f'Partition rule not found for param: {name}') def tree_path_to_string(path): keys = [] for i, key in enumerate(path): if isinstance(key, jax.tree_util.SequenceKey): keys.append(str(key.idx)) elif isinstance(key, (jax.tree_util.DictKey, jax.tree_util.FlattenedIndexKey)): keys.append(str(key.key)) elif isinstance(key, jax.tree_util.GetAttrKey): keys.append(str(key.name)) else: keys.append(str(key)) return '/'.join(keys) return jax.tree_util.tree_map_with_path( lambda path, p: get_partition_spec(tree_path_to_string(path), p), params ) def count_num_params(_p): return sum(i.size for i in jax.tree_util.tree_flatten(flax.core.unfreeze(_p))[0]) def count_params(_p): print('\033[1;31mModel Contain : ', sum(i.size for i in jax.tree_util.tree_flatten(flax.core.unfreeze(_p))[0]) / 1e9, ' Billion Parameters') def names_in_mesh(*names): return set(names) <= set(pxla.thread_resources.env.physical_mesh.axis_names) def get_names(partition_specs): names = set() for item in partition_specs: if item is None: continue elif isinstance(item, str): names.add(item) return list(names) def with_sharding_constraint__a(x, partition_spec): names = get_names(partition_spec) if names_in_mesh(*names): x = with_sharding_constraint(x, partition_spec) return x def get_devices(tensor): return tensor.devices() def change_to_bf16(tensor): return tensor.astype(jnp.bfloat16) def change_to_fp16(tensor): return tensor.astype(jnp.float16) def change_to_fp32(tensor): return tensor.astype(jnp.float32) def change(tensor, device): return jax.device_put(tensor, device) def read_ckpt(path: [str, os.PathLike], shard_fns=None, add_extra_past_fix: list = None): tensors = {} with open(path, 'rb') as stream: unpacker = msgpack.Unpacker(stream, read_size=83886080, max_buffer_size=0) for key, value in unpacker: if add_extra_past_fix is not None: key = add_extra_past_fix + key key = tuple(key) tensor = from_bytes(None, value) if shard_fns is not None: tensor = shard_fns[key](tensor) tensors[key] = tensor return tensors def save_ckpt(train_state, path, gather_fns=None, float_dtype=None): train_state = to_state_dict(train_state) packer = msgpack.Packer() flattend_train_state = flatten_dict(train_state) if gather_fns is not None: gather_fns = flatten_dict(to_state_dict(gather_fns)) with open(path, "wb") as stream: for key, value in flattend_train_state.items(): if gather_fns is not None: value = gather_fns[key](value) value = float_tensor_to_dtype(value, float_dtype) stream.write(packer.pack((key, to_bytes(value)))) def match_keywords(string, ts, ns): for t in ts: if t not in string: return False for n in ns: if n in string: return False return True def load_and_convert_checkpoint(path, dtype=jnp.float16, transpose_needed: List[str] = ["kernel"], transpose_not_needed: List[str] = ['none'], select_params_field: bool = True): import torch _, flax_params = StreamingCheckpointer.load_trainstate_checkpoint('params::' + path) flax_params = flatten_dict(flax_params['params'], sep='.') if select_params_field else flatten_dict(flax_params, sep='.') torch_params = {} for key, tensor in flax_params.items(): if match_keywords(key, transpose_needed, transpose_not_needed): tensor = tensor.T tensor = float_tensor_to_dtype(tensor, dtype) torch_params[key] = torch.from_numpy(tensor) return torch_params def read_json(path): with open(path, "r") as stream: return json.load(stream) def write_json(text, path): with open(path, "w") as stream: json.dump(text, stream) def get_dataloader(dataset_or_huggingface_dataset_hub_id: Any, batch_size: int, num_epochs: int, select_hf_dataset_field='train', max_steps: int = None, max_length: int = 4096, dataset_hf_kwargs: dict = {}, collate_fn: Callable = None, shuffle: Optional[bool] = None, sampler=None, batch_sampler=None, num_workers: int = 0, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn=None, multiprocessing_context=None, generator=None, *, prefetch_factor: Optional[int] = None, persistent_workers: bool = False, pin_memory_device: str = ""): if collate_fn is None: def collate_fn(batch): rs = {} for key in batch[0].keys(): ssp = [jnp.array(f[key])[..., -max_length:] for f in batch] rs[key] = jnp.stack(ssp).reshape(-1, ssp[0].shape[-1]) return rs from torch.utils.data import DataLoader if isinstance(dataset_or_huggingface_dataset_hub_id, str): from datasets import load_dataset dataset = load_dataset(dataset_or_huggingface_dataset_hub_id, **dataset_hf_kwargs)[select_hf_dataset_field] else: dataset = dataset_or_huggingface_dataset_hub_id dataloader = DataLoader( dataset=dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=pin_memory, prefetch_factor=prefetch_factor, num_workers=num_workers, shuffle=shuffle, timeout=timeout, sampler=sampler, batch_sampler=batch_sampler, drop_last=drop_last, generator=generator, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device, multiprocessing_context=multiprocessing_context, worker_init_fn=worker_init_fn ) max_steps = num_epochs * len(dataloader) if max_steps is None else max_steps return dataloader, max_steps
PypiClean
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/earning.py
import re # noqa: F401 import sys # noqa: F401 from typing import ( Optional, Union, List, Dict, ) from MergePythonSDK.shared.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, OpenApiModel, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from MergePythonSDK.shared.exceptions import ApiAttributeError from MergePythonSDK.shared.model_utils import import_model_by_name def lazy_import(): from MergePythonSDK.hris.model.earning_type_enum import EarningTypeEnum globals()['EarningTypeEnum'] = EarningTypeEnum class Earning(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() defined_types = { 'id': (str, none_type,), # noqa: E501 'employee_payroll_run': (str, none_type, none_type,), # noqa: E501 'amount': (float, none_type, none_type,), # noqa: E501 'type': (EarningTypeEnum, str, none_type,), 'remote_was_deleted': (bool, none_type,), # noqa: E501 } return defined_types @cached_property def discriminator(): return None attribute_map = { 'id': 'id', # noqa: E501 'employee_payroll_run': 'employee_payroll_run', # noqa: E501 'amount': 'amount', # noqa: E501 'type': 'type', # noqa: E501 'remote_was_deleted': 'remote_was_deleted', # noqa: E501 } read_only_vars = { 'id', # noqa: E501 } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """Earning - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) id (str): [optional] # noqa: E501 employee_payroll_run (str, none_type): [optional] # noqa: E501 amount (float, none_type): The amount earned.. [optional] # noqa: E501 type (bool, dict, float, int, list, str, none_type): The type of earning.. [optional] # noqa: E501 remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.employee_payroll_run = kwargs.get("employee_payroll_run", None) self.amount = kwargs.get("amount", None) self.type = kwargs.get("type", None) self.remote_was_deleted = kwargs.get("remote_was_deleted", None) # Read only properties self._id = kwargs.get("id", str()) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """Earning - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) id (str): [optional] # noqa: E501 employee_payroll_run (str, none_type): [optional] # noqa: E501 amount (float, none_type): The amount earned.. [optional] # noqa: E501 type (bool, dict, float, int, list, str, none_type): The type of earning.. [optional] # noqa: E501 remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.employee_payroll_run: Union[str, none_type] = kwargs.get("employee_payroll_run", None) self.amount: Union[float, none_type] = kwargs.get("amount", None) self.type: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("type", None) self.remote_was_deleted: Union[bool] = kwargs.get("remote_was_deleted", bool()) # Read only properties self._id: Union[str] = kwargs.get("id", str()) # Read only property getters @property def id(self): return self._id
PypiClean
/Kreveik-0.6.0.tar.gz/Kreveik-0.6.0/kreveik/family/motifs.py
def motif_freqs(family,degree, exclusive=False, sorting=False, **kwargs): """ Returns a list of motifs of the family. Takes every single individual of a family, extracts motif frequencies of every single one of them (Check network.motif.motif_freqs() function), accumulating with each individual. When supplied with a motiflist, this function only searches for the motifs within the list. If there isn't any motiflist supplied, then the function generates all possible motifs with a specified number of nodes, and resumes calculation. Args: ---- family: The family in which motif frequencies will be extracted. degree: the number of nodes of motifs in question. sorting: if True, will sort the returned list of motif frequencies. motiflist: an optional argument, that if supplied, the search for motifs will be limited with that particular list. """ from kreveik import network import copy import logging if 'motiflist' in kwargs: returned_motifs = copy.deepcopy(kwargs['motiflist']) else: if(exclusive == True): returned_motifs = network.motif.exclusive_conn_motifs(degree)[:] else: returned_motifs = network.motif.all_conn_motifs(degree)[:] logging.info("Computing motif frequencies of the family") for networkf in family: returned_motifs = network.motif.motif_freqs(networkf, degree, motiflist=returned_motifs) if sorting: return sorted (returned_motifs, key = lambda returned_motifs:returned_motifs[1] , reverse = True) else: return returned_motifs def relative_motif_freqs(network_family, degree, sorting=False, **kwargs): """ """ from kreveik import network import copy import logging import numpy as num if 'motiflist' in kwargs: returned_motifs = copy.deepcopy(kwargs['motiflist']) else: returned_motifs = network.motif.all_conn_motifs(degree)[:] logging.info("Computing relative motif frequencies of the family") family_motifs = motif_freqs(network_family, degree, motiflist=returned_motifs)[:] motif_counts = num.array([[family_motifs[i][1]] for i in range(len(family_motifs))]) relative_freqs = [] for i in range(len(family_motifs)): relative_freqs.append([family_motifs[i][0], float(family_motifs[i][1])/motif_counts.sum()]) return num.array(relative_freqs) def exclusive_motif_freqs(family,degree): """ Work in Progress """ from kreveik import network import copy import logging motifs = network.motif.exclusive_conn_motifs(degree)[:] for networkf in family: logging.info("Computing motif frequencies of the network"+str(networkf)+".") returned_motifs = network.motif.motif_freqs(networkf, degree, exclusive=True) for i in range(len(motifs)): motifs[i][1] = motifs[i][1] + returned_motifs[i][1] return motifs __all__ =[motif_freqs, exclusive_motif_freqs]
PypiClean
/netket-3.9.2.tar.gz/netket-3.9.2/netket/stats/mc_stats_old.py
from functools import partial import jax from jax import numpy as jnp import numpy as np from netket import jax as nkjax from netket.utils import config from . import mean as _mean from . import var as _var from . import total_size as _total_size from .mc_stats import Stats def _get_blocks(data, block_size): chain_length = data.shape[1] n_blocks = int(np.floor(chain_length / float(block_size))) return data[:, 0 : n_blocks * block_size].reshape((-1, block_size)).mean(axis=1) def _block_variance(data, l): blocks = _get_blocks(data, l) ts = _total_size(blocks) if ts > 0: return _var(blocks), ts else: return jnp.nan, 0 def _batch_variance(data): b_means = data.mean(axis=1) ts = _total_size(b_means) return _var(b_means), ts # this is not batch_size maybe? def statistics(data, batch_size=32): r""" Returns statistics of a given array (or matrix, see below) containing a stream of data. This is particularly useful to analyze Markov Chain data, but it can be used also for other type of time series. Assumes same shape on all MPI processes. Args: data (vector or matrix): The input data. It can be real or complex valued. * if a vector, it is assumed that this is a time series of data (not necessarily independent); * if a matrix, it is assumed that that rows :code:`data[i]` contain independent time series. Returns: Stats: A dictionary-compatible class containing the average (:code:`.mean`, :code:`["Mean"]`), variance (:code:`.variance`, :code:`["Variance"]`), the Monte Carlo standard error of the mean (:code:`error_of_mean`, :code:`["Sigma"]`), an estimate of the autocorrelation time (:code:`tau_corr`, :code:`["TauCorr"]`), and the Gelman-Rubin split-Rhat diagnostic (:code:`.R_hat`, :code:`["R_hat"]`). These properties can be accessed both the attribute and the dictionary-style syntax (both indicated above). The split-Rhat diagnostic is based on comparing intra-chain and inter-chain statistics of the sample and is thus only available for 2d-array inputs where the rows are independently sampled MCMC chains. In an ideal MCMC samples, R_hat should be 1.0. If it deviates from this value too much, this indicates MCMC convergence issues. Thresholds such as R_hat > 1.1 or even R_hat > 1.01 have been suggested in the literature for when to discard a sample. (See, e.g., Gelman et al., `Bayesian Data Analysis <http://www.stat.columbia.edu/~gelman/book/>`_, or Vehtari et al., `arXiv:1903.08008 <https://arxiv.org/abs/1903.08008>`_.) """ return _statistics(data, batch_size) @partial(jax.jit, static_argnums=1) def _statistics(data, batch_size): data = jnp.atleast_1d(data) if data.ndim == 1: data = data.reshape((1, -1)) if data.ndim > 2: raise NotImplementedError("Statistics are implemented only for ndim<=2") mean = _mean(data) variance = _var(data) ts = _total_size(data) bare_var = variance batch_var, n_batches = _batch_variance(data) l_block = max(1, data.shape[1] // batch_size) block_var, n_blocks = _block_variance(data, l_block) tau_batch = ((ts / n_batches) * batch_var / bare_var - 1) * 0.5 tau_block = ((ts / n_blocks) * block_var / bare_var - 1) * 0.5 batch_good = (tau_batch < 6 * data.shape[1]) * (n_batches >= batch_size) block_good = (tau_block < 6 * l_block) * (n_blocks >= batch_size) stat_dtype = nkjax.dtype_real(data.dtype) # if batch_good: # error_of_mean = jnp.sqrt(batch_var / n_batches) # tau_corr = jnp.max(0, tau_batch) # elif block_good: # error_of_mean = jnp.sqrt(block_var / n_blocks) # tau_corr = jnp.max(0, tau_block) # else: # error_of_mean = jnp.nan # tau_corr = jnp.nan # jax style def batch_good_err(args): batch_var, tau_batch, *_ = args error_of_mean = jnp.sqrt(batch_var / n_batches) tau_corr = jnp.clip(tau_batch, 0) return jnp.asarray(error_of_mean, dtype=stat_dtype), jnp.asarray( tau_corr, dtype=stat_dtype ) def block_good_err(args): _, _, block_var, tau_block = args error_of_mean = jnp.sqrt(block_var / n_blocks) tau_corr = jnp.clip(tau_block, 0) return jnp.asarray(error_of_mean, dtype=stat_dtype), jnp.asarray( tau_corr, dtype=stat_dtype ) def nan_err(args): return jnp.asarray(jnp.nan, dtype=stat_dtype), jnp.asarray( jnp.nan, dtype=stat_dtype ) def batch_not_good(args): batch_var, tau_batch, block_var, tau_block, block_good = args return jax.lax.cond( block_good, block_good_err, nan_err, (batch_var, tau_batch, block_var, tau_block), ) error_of_mean, tau_corr = jax.lax.cond( batch_good, batch_good_err, batch_not_good, (batch_var, tau_batch, block_var, tau_block, block_good), ) if n_batches > 1: N = data.shape[-1] if not config.netket_use_plain_rhat: # compute split-chain batch variance local_batch_size = data.shape[0] if N % 2 == 0: # split each chain in the middle, # like [[1 2 3 4]] -> [[1 2][3 4]] batch_var, _ = _batch_variance( data.reshape(2 * local_batch_size, N // 2) ) else: # drop the last sample of each chain for an even split, # like [[1 2 3 4 5]] -> [[1 2][3 4]] batch_var, _ = _batch_variance( data[:, :-1].reshape(2 * local_batch_size, N // 2) ) # V_loc = _np.var(data, axis=-1, ddof=0) # W_loc = _np.mean(V_loc) # W = _mean(W_loc) # # This approximation seems to hold well enough for larger n_samples W = variance R_hat = jnp.sqrt((N - 1) / N + batch_var / W) else: R_hat = jnp.nan res = Stats(mean, error_of_mean, variance, tau_corr, R_hat) return res
PypiClean
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/threads/participants.py
from django.contrib.auth import get_user_model from django.utils.translation import gettext as _ from ..notifications.tasks import notify_on_new_private_thread from .events import record_event from .models import Thread, ThreadParticipant User = get_user_model() def has_participants(thread): return thread.threadparticipant_set.exists() def make_participants_aware(user, target): if hasattr(target, "__iter__"): make_threads_participants_aware(user, target) else: make_thread_participants_aware(user, target) def make_threads_participants_aware(user, threads): threads_dict = {} for thread in threads: thread.participant = None threads_dict[thread.pk] = thread participants_qs = ThreadParticipant.objects.filter( user=user, thread_id__in=threads_dict.keys() ) for participant in participants_qs: participant.user = user threads_dict[participant.thread_id].participant = participant def make_thread_participants_aware(user, thread): thread.participants_list = [] thread.participant = None participants_qs = ThreadParticipant.objects.filter(thread=thread) participants_qs = participants_qs.select_related("user") for participant in participants_qs.order_by("-is_owner", "user__slug"): participant.thread = thread thread.participants_list.append(participant) if participant.user == user: thread.participant = participant return thread.participants_list def set_users_unread_private_threads_sync( users=None, participants=None, exclude_user=None ): users_ids = [] if users: users_ids += [u.pk for u in users] if participants: users_ids += [p.user_id for p in participants] if exclude_user: users_ids = filter(lambda u: u != exclude_user.pk, users_ids) if not users_ids: return User.objects.filter(id__in=set(users_ids)).update(sync_unread_private_threads=True) def set_owner(thread, user): ThreadParticipant.objects.set_owner(thread, user) def change_owner(request, thread, new_owner): ThreadParticipant.objects.set_owner(thread, new_owner) set_users_unread_private_threads_sync( participants=thread.participants_list, exclude_user=request.user ) if thread.participant and thread.participant.is_owner: record_event( request, thread, "changed_owner", { "user": { "id": new_owner.id, "username": new_owner.username, "url": new_owner.get_absolute_url(), } }, ) else: record_event(request, thread, "tookover") def add_participant(request, thread, new_participant): """adds single participant to thread, registers this on the event""" add_participants(request.user, thread, [new_participant]) if request.user == new_participant: record_event(request, thread, "entered_thread") else: record_event( request, thread, "added_participant", { "user": { "id": new_participant.id, "username": new_participant.username, "url": new_participant.get_absolute_url(), } }, ) def add_participants(user: User, thread: Thread, participants: list[User]): """ Add multiple participants to thread, set "recount private threads" flag on them notify them about being added to thread. """ ThreadParticipant.objects.add_participants(thread, participants) try: thread_participants = thread.participants_list except AttributeError: thread_participants = [] set_users_unread_private_threads_sync( users=participants, participants=thread_participants, exclude_user=user ) participants_ids = [ participant.id for participant in participants if participant.id != user.id ] if participants_ids: notify_on_new_private_thread.delay(user.id, thread.id, participants_ids) def remove_participant(request, thread, user): """remove thread participant, set "recound private threads" flag on user""" removed_owner = False remaining_participants = [] for participant in thread.participants_list: if participant.user == user: removed_owner = participant.is_owner else: remaining_participants.append(participant.user) set_users_unread_private_threads_sync(participants=thread.participants_list) if not remaining_participants: thread.delete() else: thread.threadparticipant_set.filter(user=user).delete() thread.subscription_set.filter(user=user).delete() if removed_owner: thread.is_closed = True # flag thread to close if request.user == user: event_type = "owner_left" else: event_type = "removed_owner" else: if request.user == user: event_type = "participant_left" else: event_type = "removed_participant" record_event( request, thread, event_type, { "user": { "id": user.id, "username": user.username, "url": user.get_absolute_url(), } }, )
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/attr/converters.py
from __future__ import absolute_import, division, print_function from ._make import NOTHING, Factory def optional(converter): """ A converter that allows an attribute to be optional. An optional attribute is one which can be set to ``None``. :param callable converter: the converter that is used for non-``None`` values. .. versionadded:: 17.1.0 """ def optional_converter(val): if val is None: return None return converter(val) return optional_converter def default_if_none(default=NOTHING, factory=None): """ A converter that allows to replace ``None`` values by *default* or the result of *factory*. :param default: Value to be used if ``None`` is passed. Passing an instance of :class:`attr.Factory` is supported, however the ``takes_self`` option is *not*. :param callable factory: A callable that takes not parameters whose result is used if ``None`` is passed. :raises TypeError: If **neither** *default* or *factory* is passed. :raises TypeError: If **both** *default* and *factory* are passed. :raises ValueError: If an instance of :class:`attr.Factory` is passed with ``takes_self=True``. .. versionadded:: 18.2.0 """ if default is NOTHING and factory is None: raise TypeError("Must pass either `default` or `factory`.") if default is not NOTHING and factory is not None: raise TypeError( "Must pass either `default` or `factory` but not both." ) if factory is not None: default = Factory(factory) if isinstance(default, Factory): if default.takes_self: raise ValueError( "`takes_self` is not supported by default_if_none." ) def default_if_none_converter(val): if val is not None: return val return default.factory() else: def default_if_none_converter(val): if val is not None: return val return default return default_if_none_converter
PypiClean
/CQPy-0.1.2.tar.gz/CQPy-0.1.2/cqpy/_callback.py
from cqpy.events import dispatch, Event, EventType # 生命周期回调 def on_enable() -> int: return dispatch(Event(EventType.Enable)) def on_disable() -> int: return dispatch(Event(EventType.Disable)) # 消息事件 def on_private_msg(sub_type: int, msg_id: int, from_qq: int, msg: bytes, font: int) -> int: return dispatch( Event( EventType.PrivateMessage, sub_type=sub_type, msg_id=msg_id, from_qq=from_qq, msg=msg.decode('gb18030'), font=font, ) ) def on_group_msg(sub_type: int, msg_id: int, from_group: int, from_qq: int, from_anonymous_base64: bytes, msg: bytes, font: int) -> int: return dispatch( Event( EventType.GroupMessage, sub_type=sub_type, msg_id=msg_id, from_group=from_group, from_qq=from_qq, from_anonymous_base64=from_anonymous_base64.decode('gb18030'), msg=msg.decode('gb18030'), font=font, ) ) def on_discuss_msg(sub_type: int, msg_id: int, from_discuss: int, from_qq: int, msg: bytes, font: int) -> int: return dispatch( Event( EventType.DiscussMessage, msg_id=msg_id, from_discuss=from_discuss, from_qq=from_qq, msg=msg.decode('gb18030'), font=font, ) ) # 群组事件 def on_group_upload(sub_type: int, send_time: int, from_group: int, from_qq: int, file_base64: bytes) -> int: return dispatch( Event( EventType.GroupUpload, sub_type=sub_type, send_time=send_time, from_group=from_group, from_qq=from_qq, file_base64=file_base64.decode('gb18030'), ) ) def on_group_admin(sub_type: int, send_time: int, from_group: int, being_operate_qq: int) -> int: return dispatch( Event( EventType.GroupAdmin, sub_type=sub_type, send_time=send_time, from_group=from_group, being_operate_qq=being_operate_qq, ) ) def on_group_member_decrease(sub_type: int, send_time: int, from_group: int, from_qq: int, being_operate_qq: int) -> int: return dispatch( Event( EventType.GroupMemberDecrease, sub_type=sub_type, send_time=send_time, from_group=from_group, from_qq=from_qq, being_operate_qq=being_operate_qq, ) ) def on_group_member_increase(sub_type: int, send_time: int, from_group: int, from_qq: int, being_operate_qq: int) -> int: return dispatch( Event( EventType.GroupMemberIncrease, sub_type=sub_type, send_time=send_time, from_group=from_group, from_qq=from_qq, being_operate_qq=being_operate_qq, ) ) def on_group_ban(sub_type: int, send_time: int, from_group: int, from_qq: int, being_operate_qq: int, duration: int) -> int: return dispatch( Event( EventType.GroupBan, sub_type=sub_type, send_time=send_time, from_group=from_group, from_qq=from_qq, being_operate_qq=being_operate_qq, duration=duration, ) ) # 通知事件 def on_friend_add(sub_type: int, send_time: int, from_qq: int) -> int: return dispatch( Event( EventType.FriendAdd, sub_type=sub_type, send_time=send_time, from_qq=from_qq, ) ) # 请求 def on_friend_request(sub_type: int, send_time: int, from_qq: int, msg: bytes, response_flag: bytes) -> int: return dispatch( Event( EventType.FriendRequest, sub_type=sub_type, send_time=send_time, from_qq=from_qq, msg=msg.decode('gb18030'), response_flag=response_flag.decode('gb18030'), ) ) def on_group_request(sub_type: int, send_time: int, from_group: int, from_qq: int, msg: bytes, response_flag: bytes) -> int: return dispatch( Event( EventType.GroupRequest, sub_type=sub_type, send_time=send_time, from_group=from_group, from_qq=from_qq, msg=msg.decode('gb18030'), response_flag=response_flag.decode('gb18030'), ) )
PypiClean
/Landport-1.2.3.tar.gz/Landport-1.2.3/landport/utils/ttl.py
import time import traceback import logging from tornado import ioloop logger = logging.getLogger('simple') class TTLManager(object): """ Set some thing during a time check it whether timeout! """ def __init__(self, timeout=20, ttl_type='ping', frequency=2000, detail=True): self._key_hash_time = {} self._id_hash_handler = {} self.timeout = timeout self.ttl_type = ttl_type self.frequency = frequency self.detail = detail self.uid_set = [] self.seq = {} def update(self, key): str_key = self.ttl_type + str(id(key)) self._key_hash_time[str_key] = time.time() self._id_hash_handler[str_key] = key self.uid_set.append(key.arg.get('uid')) self.seq[str_key] = 0 def is_expire(self, key): distance = time.time() - self._key_hash_time[key] if distance > self.timeout: return 'expire' else: self.seq[key] += 1 return distance def clean_expire(self): del_key = [] for key in self._key_hash_time: distance = self.is_expire(key) if self.detail: logger.debug("Checker from %s:seq=%suid=%sttl=%stime=%s ms", self.ttl_type, self.seq[key], self._id_hash_handler[key].arg.get('uid'), self.timeout, distance ) if distance is 'expire': handler = self._id_hash_handler[key] try: handler.close_when_expire(ttl_type=self.ttl_type, code=107, reason='hb timeout') except: logger.error(traceback.format_exc()) else: del_key.append(key) for key in del_key: self._remove(key) def _remove(self, key): if key in self._key_hash_time: del self._key_hash_time[key] if key in self._id_hash_handler: del self._id_hash_handler[key] if key in self.seq: del self.seq[key] def remove(self, handler): str_key = self.ttl_type + str(id(handler)) self._remove(str_key) if handler.uid in self.uid_set: self.uid_set.remove(handler.arg.get('uid')) def start(self): ioloop.PeriodicCallback(self.clean_expire, self.frequency).start()
PypiClean
/FHIRkit-0.1.2.tar.gz/FHIRkit-0.1.2/fhirkit/snomed/terminology.py
from __future__ import annotations import logging from urllib.parse import urlencode try: from typing import Literal except ImportError: from typing_extensions import Literal from pydantic import Field from typing import ClassVar, Optional, Union try: from typing import Annotated except ImportError: from typing_extensions import Annotated import requests from pydantic import Field, HttpUrl, ValidationError, parse_obj_as from fhirkit.Parameter import Parameters from fhirkit.Server import AbstractFHIRServer from fhirkit.TerminologyServer import AbstractFHIRTerminologyServer from fhirkit.ValueSet import VSExpansion, ValueSet from fhirkit.primitive_datatypes import URI, Code from fhirkit.elements import CodeableConcept, Coding from fhirkit.OperationOutcome import OperationOutcome, OperationOutcomeException class ExpandedValueset(ValueSet): status: Literal["active"] = Field("active") expansion: VSExpansion Response = Annotated[ Union[Parameters, OperationOutcome], Field(discriminator="resourceType") ] VSExpansionResponse = Annotated[ Union[ExpandedValueset, OperationOutcome], Field(discriminator="resourceType") ] class SCTFHIRTerminologyServer(AbstractFHIRTerminologyServer): DEFAULT_URL: ClassVar[ str ] = "https://browser.ihtsdotools.org/snowstorm/snomed-ct/fhir" DEFAULT_SERVER: ClassVar[Optional[SCTFHIRTerminologyServer]] = None RETRY_COUNT: int = 3 RETRY_PAUSE: int = 10 def __init__(self, base_url: Optional[Union[str, HttpUrl]] = None) -> None: base_url = base_url or self.DEFAULT_URL self._base_url = parse_obj_as(HttpUrl, base_url) @classmethod def default_server(cls): if cls.DEFAULT_SERVER is None: cls.DEFAULT_SERVER = SCTFHIRTerminologyServer() return cls.DEFAULT_SERVER def get_terminology_resource( self, resourceType: Optional[str], *, id: Optional[str] = None, url: Optional[URI] = None, ): return super().get_terminology_resource(resourceType, id=id, url=url) def get_resource( self, resourceType: str, *, id: Optional[str] = None, url: Optional[str] = None ): path = resourceType if id: path += "/" + id req_url = f"{self.base_url}/{path}" if url: req_url = url headers = {"Accept": "application/json", "Content-type": "application/json"} try: raw_response = requests.get(req_url, headers=headers) if raw_response.status_code != 200: logging.warn( "Received an error from the snowstorm server (%s) after sending the following request %s", raw_response.text, req_url, ) response = parse_obj_as(Response, raw_response.json()) except ValidationError: raise ConnectionError( f"Received a response that doesn't resemble a FHIR-resource. Please check if the server at {self.base_url} is a valid FHIR-server" ) except: raise RuntimeWarning( "Failed when calling {endpoint} on {base_url}".format( endpoint=path, base_url=self.base_url ) ) if isinstance(response, OperationOutcome): raise OperationOutcomeException(response) return response def valueset_expand(self, url: Union[str, HttpUrl], **kwargs): path = "ValueSet/$expand" page_size = 200 offset = 0 more_results_available = True while more_results_available: query = "url={url}&count={count}&offset={offset}".format( url=url, count=page_size, offset=offset ) query += "&" + urlencode(kwargs) if kwargs else "" req_url = f"{self.base_url}/{path}?{query}" headers = { "Accept": "application/json", } payload = "" try: raw_response = requests.request( "GET", req_url, data=payload, headers=headers ) if raw_response.status_code != 200: logging.warn( "Received an error from the snowstorm server (%s) after sending the following request %s", raw_response.text, req_url, ) response = parse_obj_as(VSExpansionResponse, raw_response.json()) if isinstance(response, OperationOutcome): raise OperationOutcomeException(response) yield response offset = response.expansion.offset + page_size remaining = max(response.expansion.total - offset, 0) except ValidationError: raise ConnectionError( f"Received a response that doesn't resemble a FHIR-server. Please check if the server at {self.base_url} is a valid FHIR-server. \n Response:\n\n {raw_response.json()}" ) except: raise raise RuntimeWarning( "Failed when calling {endpoint} on {base_url}".format( endpoint=path, base_url=self.base_url ) ) else: if remaining % (page_size * 10) == 0: logging.debug( f"{remaining} concepts remaining (total: {response.expansion.total} | page size: {page_size})." ) more_results_available = remaining > 0 def valueset_validate_code( self, url: Union[str, HttpUrl], code: Optional[Code] = None, display: Optional[str] = None, system: Optional[HttpUrl] = None, coding: Optional[Coding] = None, codeableConcept: Optional[CodeableConcept] = None, ) -> bool: assert ( code or coding or codeableConcept ), "At least a code, coding or codeableConcept must be given to validate." path = "ValueSet/$validate-code" parameters = [dict(name="url", valueUri=str(url))] if code and system: parameters.append(dict(name="code", valueCode=code)) parameters.append(dict(name="system", valueUri=str(system))) if display: parameters.append(dict(name="display", valueString=display)) if coding: parameters.append(dict(name="coding", valueCoding=coding)) if codeableConcept: parameters.append( dict(name="codeableConcept", valueCodeableConcept=codeableConcept) ) req_url = f"{self.base_url}/{path}" headersList = {"Accept": "application/json", "Content-type": "application/json"} payload = Parameters(parameter=parameters).json(exclude_none=True) try: raw_response = requests.request( "POST", req_url, data=payload, headers=headersList ) if raw_response.status_code != 200: logging.warn( "Received an error from the snowstorm server (%s) after sending the following request %s", raw_response.text, req_url, ) response = parse_obj_as(Response, raw_response.json()) except ValidationError: raise ConnectionError( f"Received a response that doesn't resemble a FHIR-server. Please check if the server at {self.base_url} is a valid FHIR-server" ) except: raise RuntimeWarning( "Failed when calling {endpoint} on {base_url}".format( endpoint=path, base_url=self.base_url ) ) if isinstance(response, OperationOutcome): raise OperationOutcomeException(response) return response def get_default_terminology_server(): return SCTFHIRTerminologyServer.default_server() DEFAULT_TERMINOLOGY_SERVER = SCTFHIRTerminologyServer()
PypiClean
/DRSlib-DavidRodriguezSoaresCUI-0.8.0.tar.gz/DRSlib-DavidRodriguezSoaresCUI-0.8.0/src/DRSlib/execute.py
import multiprocessing import subprocess # nosec import sys import threading import time from collections import deque from os import PathLike from pathlib import Path from typing import Any, Dict, Sequence, Union import numpy as np import psutil from .os_detect import Os from .str_utils import ensure_quoted_on_space from .utils import assertTrue DETECTED_OS = Os() OS_IS_UNIX = ( DETECTED_OS.linux or DETECTED_OS.cygwin or DETECTED_OS.mac or DETECTED_OS.wsl ) COMMAND_TYPE = Union[ Union[str, bytes, PathLike[str], PathLike[bytes]], Sequence[Union[str, bytes, PathLike[str], PathLike[bytes]]], ] def execute(command: COMMAND_TYPE, shell: bool = False) -> Dict[str, str]: """Passes command to subprocess.Popen, retrieves stdout/stderr and performs error management. Returns a dictionnary containing stdX. Upon command failure, prints exception and returns empty dict.""" try: with subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell ) as process: # nosec # wait and retrieve stdout/err _stdout, _stderr = process.communicate() # handle text encoding issues and return stdX return { "stdout": _stdout.decode("utf8", errors="backslashreplace"), "stderr": _stderr.decode("utf8", errors="backslashreplace"), } except Exception as e: print(f"execute: Error while executing command '{command}' : {e}") # type: ignore[str-bytes-safe] raise def command_to_string(command: COMMAND_TYPE) -> str: """Returns string representation for command""" return " ".join( ensure_quoted_on_space(x) for x in [c if isinstance(c, str) else str(c) for c in command] ) # The following code was adapted from https://github.com/manzik/cmdbench def debug_execute(commands_list: COMMAND_TYPE): """Execute command and get resource usage statistics""" time_tmp_output_file = None if DETECTED_OS.linux: # Preprocessing: Wrap the target command around the GNU Time command time_tmp_output_file = Path("./.time.tmp") commands_list = [ "/usr/bin/time", "-o", time_tmp_output_file, "-v", ] + commands_list # START: Initialization # CPU cpu_times, disk_io_counters = None, None # Time series data # We don't need fast read access, we need fast insertion so we use deque sample_milliseconds, cpu_percentages, memory_values = ( deque([]), deque([]), deque([]), ) manager = multiprocessing.Manager() shared_process_dict_template = { "target_process_pid": -1, "execution_start": -1, "sample_milliseconds": sample_milliseconds, "cpu_percentages": cpu_percentages, "memory_values": memory_values, "memory_max": 0, "memory_perprocess_max": 0, "disk_io_counters": disk_io_counters, "cpu_times": cpu_times, } shared_process_dict = manager.dict(shared_process_dict_template) # Subprocess: For time series measurements # We need a non-blocking method to capture essential info (disk usage, cpu times) # and non-essential time-series info in parallel. # So we use either multiprocessing or threading to achieve this # Linux: Processes are faster than threads # Windows: Both are as fast but processes take longer to start if DETECTED_OS.linux: time_series_exec = multiprocessing.Process( target=collect_time_series, args=(shared_process_dict,) ) else: time_series_exec = threading.Thread( target=collect_time_series, args=(shared_process_dict,) ) time_series_exec.start() # Finally, run the command # Master process could be GNU Time running target command or the target command itself master_process = psutil.Popen( commands_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) execution_start = time.time() # p is always the target process to monitor p = get_target_process(master_process) shared_process_dict["execution_start"] = execution_start shared_process_dict["target_process_pid"] = p.pid # Wait for process to finish (time_series_exec and fixed_data_exec will be processing it in parallel) outdata, errdata = master_process.communicate() stdout, stderr = outdata.decode( sys.stdout.encoding, errors="backslashreplace" ), errdata.decode(sys.stderr.encoding, errors="backslashreplace") exection_end = time.time() # Done with the master process, wait for the parallel (threads or processes) to finish up time_series_exec.join() # Collect data from other (threads or processes) and store them cpu_times = shared_process_dict["cpu_times"] disk_io_counters = shared_process_dict["disk_io_counters"] memory_max = shared_process_dict["memory_max"] memory_perprocess_max = shared_process_dict["memory_perprocess_max"] sample_milliseconds = shared_process_dict["sample_milliseconds"] cpu_percentages = shared_process_dict["cpu_percentages"] memory_values = shared_process_dict["memory_values"] # Calculate and store proper values for cpu and disk # https://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times if cpu_times is None: # macOS and Windows where cpu_times always returns 0 for children's cpu usage # Then we have calculated this info ourselves in other threads (collect_time_series, specifically) # grab and use them assertTrue( DETECTED_OS.mac or DETECTED_OS.windows, "cpu_used should not be None" ) cpu_user_time = shared_process_dict["children_user_cpu_time"] cpu_system_time = shared_process_dict["children_system_cpu_time"] else: cpu_user_time = cpu_times.user + cpu_times.children_user cpu_system_time = cpu_times.system + cpu_times.children_system cpu_total_time = cpu_user_time + cpu_system_time # Convert deques to numpy arrays sample_milliseconds = np.array(sample_milliseconds) cpu_percentages = np.array(cpu_percentages) memory_values = np.array(memory_values) # Collect info from GNU Time if it's linux gnu_times_dict = read_gnu_time(time_tmp_output_file) resource_usages = { "psutil": { # Data collected from psutil "cpu": { "total_time_s": cpu_total_time, "user_time_s": cpu_user_time, "system_time_s": cpu_system_time, }, "memory": { "max": memory_max, "max_perprocess": memory_perprocess_max, }, "process": {"execution_time_s": exection_end - execution_start}, }, "general": { # Info independent from GNU Time and psutil "command": command_to_string(commands_list), "stdout": stdout, "stderr": stderr, "exit_code": gnu_times_dict["Exit status"] if DETECTED_OS.linux else master_process.returncode, }, } add_disk_usage(disk_io_counters, resource_usages) add_gnu_time_usage(gnu_times_dict, resource_usages) return resource_usages def get_target_process(master_process): if DETECTED_OS.linux: # Only in linux, we target command will be GNU Time's child process # If we are using GNU Time and are on linux: # Wait for time to load the target process, then proceed # Depending on whether we are on linux or not # Wait for /usr/bin/time to start the target command while True: master_process_retcode = master_process.poll() if master_process_retcode is not None or not master_process.is_running(): break time_children = master_process.children(recursive=False) if len(time_children) > 0 and time_children[0] is not None: return time_children[0] else: # On other platforms, the main process will be the target process itself return master_process def add_gnu_time_usage(gnu_times_dict: dict, resource_usages: dict) -> None: """Adds gnu tume usage statistics to resource usage dictionnary""" if not DETECTED_OS.linux: return resource_usages["gnu_time"] = { "cpu": { "user_time": gnu_times_dict["User time (seconds)"], "system_time": gnu_times_dict["System time (seconds)"], "total_time": gnu_times_dict["User time (seconds)"] + gnu_times_dict["System time (seconds)"], }, "memory": { "max_perprocess": gnu_times_dict["Maximum resident set size (kbytes)"] * 1024, }, "disk": { # https://stackoverflow.com/a/42127533 "file_system_inputs": gnu_times_dict["File system inputs"] * 512, "file_system_outputs": gnu_times_dict["File system outputs"] * 512, }, "process": { "execution_time": gnu_times_dict[ "Elapsed (wall clock) time (h:mm:ss or m:ss)" ] }, } resource_usages["gnu_time_results"] = gnu_times_dict def add_disk_usage(disk_io_counters, resource_usages) -> None: """Adds disk usage statistics to resource usage dictionnary""" if DETECTED_OS.mac or disk_io_counters is None: return io = { "read_bytes": disk_io_counters.read_bytes, "write_bytes": disk_io_counters.write_bytes, "read_count": disk_io_counters.read_count, "write_count": disk_io_counters.write_count, } if DETECTED_OS.linux: io["read_chars"] = disk_io_counters.read_chars io["write_chars"] = disk_io_counters.write_chars if DETECTED_OS.windows: io["other_count"] = disk_io_counters.other_count io["other_bytes"] = disk_io_counters.other_bytes resource_usages["psutil"]["disk"] = {"io_counters": io} def read_gnu_time(time_tmp_output_file: Path) -> Dict[str, Any]: """Read GNU Time command's output and returns it parsed into a python dictionary""" if not DETECTED_OS.linux: return {} assertTrue( time_tmp_output_file is not None and time_tmp_output_file.exists(), "Expected file {} is None or doesn't exist", time_tmp_output_file, ) gnu_times_lines = [ line.strip() for line in time_tmp_output_file.read_text(encoding="utf8").splitlines() ] time_tmp_output_file.unlink() gnu_times_dict = {} for gnu_times_line in gnu_times_lines: tokens = list(map(lambda token: token.strip(), gnu_times_line.rsplit(": ", 1))) if len(tokens) < 2: continue key = tokens[0] value = tokens[1].replace("?", "0") gnu_times_dict[key] = value # We need a conversion for elapsed time from time format to seconds gnu_time_elapsed_wall_clock_key = "Elapsed (wall clock) time (h:mm:ss or m:ss)" gnu_times_dict[gnu_time_elapsed_wall_clock_key] = str( get_sec(gnu_times_dict[gnu_time_elapsed_wall_clock_key]) ) # And another conversion for cpu utilization percentage string gnu_time_job_cpu_percent = "Percent of CPU this job got" gnu_times_dict[gnu_time_job_cpu_percent] = float( gnu_times_dict[gnu_time_job_cpu_percent].replace("%", "") ) # Convert all gnu time output's int values to int and float values to float for key, value in gnu_times_dict.items(): gnu_times_dict[key] = cast_number(value) return gnu_times_dict def collect_time_series(shared_process_dict): while shared_process_dict["target_process_pid"] == -1: pass p = psutil.Process(shared_process_dict["target_process_pid"]) execution_start = shared_process_dict["execution_start"] sample_milliseconds = shared_process_dict["sample_milliseconds"] cpu_percentages = shared_process_dict["cpu_percentages"] memory_values = shared_process_dict["memory_values"] memory_perprocess_max = 0 memory_max = 0 # Children that we are processing # Set for faster "in" operation monitoring_process_children_set = set() # List for actual process access monitoring_process_children = [] # If we were able to access the process info at least once without access denied error had_permission = False # For macOS and Windows. Will be used for final user and system cpu time calculation children_cpu_times = [] while True: # retcode would be None while subprocess is running if not p.is_running(): break try: time_from_monitoring_start = time.time() - execution_start cpu_percentage = p.cpu_percent() # http://grodola.blogspot.com/2016/02/psutil-4-real-process-memory-and-environ.html memory_usage_info = p.memory_info() memory_usage = memory_usage_info.rss memory_perprocess_max = max(memory_perprocess_max, memory_usage) current_children = p.children(recursive=True) for child in current_children: with child.oneshot(): child_memory_usage_info = child.memory_info() child_memory_usage = child_memory_usage_info.rss memory_usage += child_memory_usage memory_perprocess_max = max( memory_perprocess_max, child_memory_usage ) # We need to get cpu_percentage() only for children existing for at list one iteration # Calculate CPU usage for children we have been monitoring if child in monitoring_process_children_set: child_index = monitoring_process_children.index(child) target_child_process = monitoring_process_children[child_index] if ( not DETECTED_OS.linux ): # psutil calculates children usage for us on linux. Otherwise we save the values ourselved children_cpu_times[ child_index ] = target_child_process.cpu_times() child_cpu_usage = target_child_process.cpu_percent() cpu_percentage += child_cpu_usage # Add children not already in our monitoring_process_children else: monitoring_process_children_set.add(child) monitoring_process_children.append(child) children_cpu_times.append( (0, 0, 0, 0) ) # Placeholder; almost the same shape as psutil.pcputimes memory_max = max(memory_max, memory_usage) sample_milliseconds.append(time_from_monitoring_start) cpu_percentages.append(cpu_percentage) memory_values.append(memory_usage) had_permission = True except psutil.AccessDenied as access_denied_error: # Same reasoning as usage in the collect_fixed_data function if OS_IS_UNIX: if had_permission: continue print("Root access is needed for monitoring the target command.") raise access_denied_error except psutil.NoSuchProcess: # The process might end while we are measuring resources pass except Exception as e: raise e # psutil calculates children usage for us on linux. Otherwise we calculate and pass it to the main thread. if not DETECTED_OS.linux: children_user_cpu_time = 0 children_system_cpu_time = 0 for cpu_time in children_cpu_times: children_user_cpu_time += cpu_time[0] children_system_cpu_time += cpu_time[1] shared_process_dict["children_user_cpu_time"] = children_user_cpu_time shared_process_dict["children_system_cpu_time"] = children_system_cpu_time shared_process_dict["memory_max"] = memory_max shared_process_dict["memory_perprocess_max"] = memory_perprocess_max shared_process_dict["sample_milliseconds"] = sample_milliseconds shared_process_dict["cpu_percentages"] = cpu_percentages shared_process_dict["memory_values"] = memory_values def get_sec(time_str): """Conversion of time format (hh:mm:ss or mm:ss) to seconds""" secs = 0 time_decimal = 0 time_decimal_start_ind = -1 if "." in time_str: time_decimal_start_ind = time_str.index(".") if time_decimal_start_ind > -1: time_decimal = float("0" + time_str[time_decimal_start_ind:]) time_str = time_str[:time_decimal_start_ind] time_tokens = time_str.split(":") time_tokens.reverse() for token_ind, time_token in enumerate(time_tokens): secs += int(time_token) * 60**token_ind return secs + time_decimal
PypiClean
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/nodes/VariableReleaseNodes.py
from nuitka.ModuleRegistry import getOwnerFromCodeName from .NodeBases import StatementBase class StatementReleaseVariableBase(StatementBase): """Releasing a variable. Just release the value, which of course is not to be used afterwards. Typical code: Function exit user variables, try/finally release of temporary variables. """ __slots__ = "variable", "variable_trace" def __init__(self, variable, source_ref): StatementBase.__init__(self, source_ref=source_ref) self.variable = variable self.variable_trace = None @staticmethod def isStatementReleaseVariable(): return True def finalize(self): del self.variable del self.variable_trace del self.parent def getDetails(self): return {"variable": self.variable} def getDetailsForDisplay(self): return { "variable_name": self.variable.getName(), "owner": self.variable.getOwner().getCodeName(), } @classmethod def fromXML(cls, provider, source_ref, **args): assert cls is makeStatementReleaseVariable, cls owner = getOwnerFromCodeName(args["owner"]) assert owner is not None, args["owner"] variable = owner.getProvidedVariable(args["variable_name"]) return cls(variable=variable, source_ref=source_ref) def getVariable(self): return self.variable def getVariableTrace(self): return self.variable_trace def setVariable(self, variable): self.variable = variable def computeStatement(self, trace_collection): self.variable_trace = trace_collection.getVariableCurrentTrace(self.variable) if self.variable_trace.mustNotHaveValue(): return ( None, "new_statements", "Uninitialized %s is not released." % (self.variable.getDescription()), ) escape_desc = self.variable_trace.getReleaseEscape() assert escape_desc is not None, self.variable_trace if escape_desc.isControlFlowEscape(): # Any code could be run, note that. trace_collection.onControlFlowEscape(self) # TODO: We might be able to remove ourselves based on the trace # we belong to. return self, None, None @staticmethod def mayRaiseException(exception_type): # By default, __del__ is not allowed to raise an exception. return False class StatementReleaseVariableTemp(StatementReleaseVariableBase): kind = "STATEMENT_RELEASE_VARIABLE_TEMP" class StatementReleaseVariableLocal(StatementReleaseVariableBase): kind = "STATEMENT_RELEASE_VARIABLE_LOCAL" class StatementReleaseVariableParameter(StatementReleaseVariableLocal): kind = "STATEMENT_RELEASE_VARIABLE_PARAMETER" def computeStatement(self, trace_collection): if self.variable.getOwner().isAutoReleaseVariable(self.variable): return ( None, "new_statements", "Original parameter variable value of '%s' is not released." % self.variable.getName(), ) return StatementReleaseVariableLocal.computeStatement(self, trace_collection) def makeStatementReleaseVariable(variable, source_ref): if variable.isTempVariable(): return StatementReleaseVariableTemp(variable=variable, source_ref=source_ref) elif variable.isParameterVariable(): return StatementReleaseVariableParameter( variable=variable, source_ref=source_ref ) else: return StatementReleaseVariableLocal(variable=variable, source_ref=source_ref) def makeStatementsReleaseVariables(variables, source_ref): return tuple( makeStatementReleaseVariable(variable=variable, source_ref=source_ref) for variable in variables )
PypiClean
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/interface/files/files.py
# General Imports import os import yaml # Project Imports from muphyn.packages.core.base import LogManager from muphyn.packages.interface.files.abstract_exporter import AbstractExporter from muphyn.packages.interface.files.abstract_importer import AbstractImporter from muphyn.packages.interface.files.simulation_files.simulation_exporter import SimulationExporter from muphyn.packages.interface.files.simulation_files.simulation_importer import SimulationsImporter from muphyn.packages.interface.models.editable_models.abstract_editable_model import AbstractEditableModel from muphyn.packages.interface.models.editable_models.box_code_model import BoxCodeModel from muphyn.packages.interface.models.editable_models.box_composite_model import BoxCompositeModel from muphyn.packages.interface.models.editable_models.scheduler_model import SchedulerModel from muphyn.packages.interface.models.editable_models.simulation_model import SimulationModel from muphyn.packages.interface.editors.abstract_editor import AbstractEditor #----------------------------------- # Functions #----------------------------------- def load (path : str) -> AbstractEditableModel : """Permet de charger le modèle se trouvant au chemin passé en paramètre.""" importer : AbstractImporter = None with open(path) as file_data : yaml_data = yaml.load(file_data, Loader = yaml.FullLoader) name = os.path.basename(path) path = path[:-name.__len__()] if name.endswith('.yaml') : name = name[:-('.yaml'.__len__())] if 'simulation' in yaml_data : importer = SimulationsImporter() data = yaml_data['simulation'] if importer is None : return None return importer.open(data, path, name) def save (model : AbstractEditableModel, path : str) -> bool : """Permet de sauvegarder le modèle au chemin passé en paramètre.""" exporter : AbstractExporter = None if isinstance(model, SimulationModel): exporter = SimulationExporter() elif isinstance(model, BoxCompositeModel) : LogManager().debug('Save box composite !') raise Exception('no exporter found for box composite model') elif isinstance(model, SchedulerModel) : LogManager().debug('Save scheduler !') raise Exception('no exporter found for scheduler model') elif isinstance(model, BoxCodeModel) : LogManager().debug('Save box code !') raise Exception('no exporter found for box code model') return exporter.save(model, path) def export (editor : AbstractEditor, argument : str) : """Permet d'exporter l'éditeur sous une forme voulue."""
PypiClean
/Dero-0.15.0-py3-none-any.whl/dero/time.py
import math, time, datetime, timeit def time_elapsed(seconds): ''' Takes an amount of time in seconds and converts it into how a human would say it. Required Options: seconds: time in number of seconds (raw number, int or float). ''' if seconds > 60: #if this is greater than a minute if seconds > 60 * 60: #if this is greater than an hour if seconds > 60 * 60 * 24: #if this is greater than a day if seconds > 60 * 60 * 24 * 30: #if this is greater than a month (approx.): months = math.trunc(seconds / (60 * 60 * 24 *30)) seconds -= months * (60 * 60 * 24 * 30) days = math.trunc(seconds / (60 * 60 * 24)) seconds -= days * (60 * 60 * 24) hours = math.trunc(seconds / (60 * 60)) seconds -= hours * (60 * 60) minutes = math.trunc(seconds / 60) seconds -= minutes * 60 seconds = math.trunc(seconds) print("Operation took {} months, {} days, {} hours, {} minutes, and {} seconds.".format(months, days, hours, minutes, seconds)) else: days = math.trunc(seconds / (60 * 60 * 24)) seconds -= days * (60 * 60 * 24) hours = math.trunc(seconds / (60 * 60)) seconds -= hours * (60 * 60) minutes = math.trunc(seconds / 60) seconds -= minutes * 60 seconds = math.trunc(seconds) print("Operation took {} days, {} hours, {} minutes, and {} seconds.".format(days, hours, minutes, seconds)) else: #if this is greater than an hour but less than a day hours = math.trunc(seconds / (60 * 60)) seconds -= hours * (60 * 60) minutes = math.trunc(seconds / 60) seconds -= minutes * 60 seconds = math.trunc(seconds) print("Operation took {} hours, {} minutes, and {} seconds.".format(hours, minutes, seconds)) else: #if this is greater than a minute but less than an hour minutes = math.trunc(seconds / 60) seconds -= minutes * 60 seconds = math.trunc(seconds) print("Operation took {} minutes and {} seconds.".format(minutes, seconds)) else: #if this is less than a minute seconds = math.trunc(seconds) print("Operation took {} seconds.".format(seconds)) def estimate_time(length,i,start_time,output=True): ''' Returns the estimate of when a looping operation will be finished. HOW TO USE: This function goes at the end of the loop to be timed. Outside of this function at the beginning of the loop, you must start a timer object as follows: start_time = timeit.default_timer() So the entire loop will look like this: my_start_time = timeit.default_timer() for i, item in enumerate(my_list): #Do loop stuff here estimate_time(len(my_list),i,my_start_time) REQUIRED OPTIONS: length: total number of iterations for the loop i: iterator for the loop start_time: timer object, to be started outside of this function (SEE ABOVE) OPTIONAL OPTIONS: output: specify other than True to suppress printing estimated time. Use this if you want to just store the time for some other use or custom output. The syntax then is as follows: my_start_time = timeit.default_timer() for i, item in enumerate(my_list): #Do loop stuff here my_timer = estimate_time(len(my_list),i,my_start_time) print("I like my output sentence better! Here's the estimate: {}".format(my_timer)) ''' avg_time = (timeit.default_timer() - start_time)/(i + 1) loops_left = length - (i + 1) est_time_remaining = avg_time * loops_left est_finish_time = datetime.datetime.now() + datetime.timedelta(0,est_time_remaining) if output == True: print("Estimated finish time: {}. Completed {}/{}, ({:.0%})".format(est_finish_time, i, length, i/length), end="\r") return est_finish_time def increment_dates(start_date,end_date,frequency='a'): ''' Returns a list of dates inbetween start and end dates. start_date and end_date should be in one of the following date formats: mm/dd/yyyy, mm/yyyy, yyyy Dates should be frequency should be a single letter, 'a' for annual, 'm' for monthly, 'w' for weekly, and 'd' for daily ''' #Find number of slashes to determine how to parse date number_of_slashes = [] number_of_slashes.append(start_date.count('/')) number_of_slashes.append(end_date.count('/')) date_formats = ['%Y','%Y'] #set container for date formats for i, number in enumerate(number_of_slashes): if number == 0: #no slashes means interpret as year pass #already set as year in container if number == 1: #one slash means interpret as month/year date_formats[i] = '%m/%Y' if number == 2: #one slash means interpret as month/year date_formats[i] = '%m/%d/%Y' start = datetime.datetime.strptime(start_date, date_formats[0]).date() end = datetime.datetime.strptime(end_date, date_formats[1]).date() delta = end - start number_of_years = end.year - start.year number_of_months = end.month - start.month number_of_days = end.day - start.day if frequency == 'd': number_of_periods = delta.days + 1 if frequency == 'w': number_of_periods = math.ceil((delta.days + 1)/7) if frequency == 'a': number_of_periods = math.ceil((delta.days + 1)/365) if frequency == 'm': number_of_periods = math.ceil((delta.days + 1)/(365/12)) outlist = [] for period in range(number_of_periods): if frequency == 'd': new_date = start + datetime.timedelta(days=period) outlist.append(str(new_date.month) + '/' + str(new_date.day) + '/' + str(new_date.year)) if frequency == 'w': new_date = start + datetime.timedelta(weeks=period) outlist.append(str(new_date.month) + '/' + str(new_date.day) + '/' + str(new_date.year)) if frequency == 'a': outlist.append(start.year + period) if frequency == 'm': new_period = period - 1 current_month = (start.month + new_period) current_year = start.year years_passed = math.floor(current_month/12) current_year += years_passed current_month -= 12 * years_passed - 1 if current_month > 12: pass outlist.append(str(current_month) + '/' + str(current_year)) return outlist
PypiClean
/DBSync-0.1.1.tar.gz/DBSync-0.1.1/dbsync/generators/peewee.py
#python -m pwiz -e mysql -H 10.0.2.103 -p 4005 -u readonly -P 17zuoye -t UCT_USER -i HomeworkSitter #!/usr/bin/env python import datetime from optparse import OptionParser import sys from peewee import * from peewee import print_ from peewee import __version__ as peewee_version from playhouse.reflection import * TEMPLATE = """from peewee import * from ..base_model import BaseModel database = %s('%s', **%s) class UnknownField(object): pass """ DATABASE_ALIASES = { MySQLDatabase: ['mysql', 'mysqldb'], PostgresqlDatabase: ['postgres', 'postgresql'], SqliteDatabase: ['sqlite', 'sqlite3'], } DATABASE_MAP = dict((value, key) for key in DATABASE_ALIASES for value in DATABASE_ALIASES[key]) def make_introspector(database_type, database_name, **kwargs): if database_type not in DATABASE_MAP: err('Unrecognized database, must be one of: %s' % ', '.join(DATABASE_MAP.keys())) sys.exit(1) schema = kwargs.pop('schema', None) DatabaseClass = DATABASE_MAP[database_type] db = DatabaseClass(database_name, **kwargs) return Introspector.from_database(db, schema=schema) def print_models(introspector, tables=None, preserve_order=False): database = introspector.introspect() print_(TEMPLATE % ( introspector.get_database_class().__name__, introspector.get_database_name(), repr(introspector.get_database_kwargs()))) def _print_table(table, seen, accum=None): accum = accum or [] foreign_keys = database.foreign_keys[table] for foreign_key in foreign_keys: dest = foreign_key.dest_table # In the event the destination table has already been pushed # for printing, then we have a reference cycle. if dest in accum and table not in accum: print_('# Possible reference cycle: %s' % dest) # If this is not a self-referential foreign key, and we have # not already processed the destination table, do so now. if dest not in seen and dest not in accum: seen.add(dest) if dest != table: _print_table(dest, seen, accum + [table]) print_('class %s(BaseModel):' % database.model_names[table]) columns = database.columns[table].items() if not preserve_order: columns = sorted(columns) primary_keys = database.primary_keys[table] for name, column in columns: print name, column skip = all([ name in primary_keys, name == 'id', len(primary_keys) == 1, column.field_class in introspector.pk_classes]) if skip: continue if column.primary_key and len(primary_keys) > 1: # If we have a CompositeKey, then we do not want to explicitly # mark the columns as being primary keys. column.primary_key = False print_(' %s' % column.get_field()) print_('') print_(' class Meta:') print_(' db_table = \'%s\'' % table) if introspector.schema: print_(' schema = \'%s\'' % introspector.schema) if len(primary_keys) > 1: pk_field_names = sorted([ field.name for col, field in columns if col in primary_keys]) pk_list = ', '.join("'%s'" % pk for pk in pk_field_names) print_(' primary_key = CompositeKey(%s)' % pk_list) print_('') seen.add(table) seen = set() for table in sorted(database.model_names.keys()): if table not in seen: if not tables or table in tables: _print_table(table, seen) def print_header(cmd_line, introspector): timestamp = datetime.datetime.now() print_('# Code generated by:') print_('# python -m pwiz %s' % cmd_line) print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p')) print_('# Database: %s' % introspector.get_database_name()) print_('# Peewee version: %s' % peewee_version) print_('') def err(msg): sys.stderr.write('\033[91m%s\033[0m\n' % msg) sys.stderr.flush() def get_option_parser(): parser = OptionParser(usage='usage: %prog [options] database_name') ao = parser.add_option ao('-H', '--host', dest='host') ao('-p', '--port', dest='port', type='int') ao('-u', '--user', dest='user') ao('-P', '--password', dest='password') engines = sorted(DATABASE_MAP) ao('-e', '--engine', dest='engine', default='postgresql', choices=engines, help=('Database type, e.g. sqlite, mysql or postgresql. Default ' 'is "postgresql".')) ao('-s', '--schema', dest='schema') ao('-t', '--tables', dest='tables', help=('Only generate the specified tables. Multiple table names should ' 'be separated by commas.')) ao('-i', '--info', dest='info', action='store_true', help=('Add database information and other metadata to top of the ' 'generated file.')) ao('-o', '--preserve-order', action='store_true', dest='preserve_order', help='Model definition column ordering matches source table.') return parser def get_connect_kwargs(options): ops = ('host', 'port', 'user', 'password', 'schema') return dict((o, getattr(options, o)) for o in ops if getattr(options, o)) if __name__ == '__main__': raw_argv = sys.argv parser = get_option_parser() options, args = parser.parse_args() if options.preserve_order: try: from collections import OrderedDict except ImportError: err('Preserve order requires Python >= 2.7.') sys.exit(1) if len(args) < 1: err('Missing required parameter "database"') parser.print_help() sys.exit(1) connect = get_connect_kwargs(options) database = args[-1] tables = None if options.tables: tables = [table.strip() for table in options.tables.split(',') if table.strip()] introspector = make_introspector(options.engine, database, **connect) if options.info: cmd_line = ' '.join(raw_argv[1:]) print_header(cmd_line, introspector) print_models(introspector, tables, preserve_order=options.preserve_order)
PypiClean
/DI_engine-0.4.9-py3-none-any.whl/dizoo/rocket/envs/rocket_env.py
from typing import Any, List, Union, Optional import time import os import imageio import gym import copy import numpy as np from easydict import EasyDict from rocket_recycling.rocket import Rocket from ding.envs import BaseEnv, BaseEnvTimestep from ding.torch_utils import to_ndarray, to_list from ding.utils import ENV_REGISTRY from ding.envs import ObsPlusPrevActRewWrapper @ENV_REGISTRY.register('rocket', force_overwrite=True) class RocketEnv(BaseEnv): def __init__(self, cfg: dict = {}) -> None: self._cfg = cfg self._init_flag = False self._save_replay = False self._observation_space = gym.spaces.Box(low=float("-inf"), high=float("inf"), shape=(8, ), dtype=np.float32) self._action_space = gym.spaces.Discrete(9) self._action_space.seed(0) # default seed self._reward_space = gym.spaces.Box(low=float("-inf"), high=float("inf"), shape=(1, ), dtype=np.float32) def reset(self) -> np.ndarray: if not self._init_flag: self._env = Rocket(task=self._cfg.task, max_steps=self._cfg.max_steps) self._init_flag = True if hasattr(self, '_seed') and hasattr(self, '_dynamic_seed') and self._dynamic_seed: np_seed = 100 * np.random.randint(1, 1000) self._env.seed(self._seed + np_seed) self._action_space.seed(self._seed + np_seed) elif hasattr(self, '_seed'): self._env.seed(self._seed) self._action_space.seed(self._seed) self._eval_episode_return = 0 obs = self._env.reset() obs = to_ndarray(obs) if self._save_replay: self._frames = [] return obs def close(self) -> None: if self._init_flag: self._env.close() self._init_flag = False def seed(self, seed: int, dynamic_seed: bool = True) -> None: self._seed = seed self._dynamic_seed = dynamic_seed np.random.seed(self._seed) def step(self, action: Union[int, np.ndarray]) -> BaseEnvTimestep: if isinstance(action, np.ndarray) and action.shape == (1, ): action = action.squeeze() # 0-dim array obs, rew, done, info = self._env.step(action) self._env.render() self._eval_episode_return += rew if self._save_replay: self._frames.extend(self._env.render()) if done: info['eval_episode_return'] = self._eval_episode_return if self._save_replay: path = os.path.join(self._replay_path, '{}_episode.gif'.format(self._save_replay_count)) self.display_frames_as_gif(self._frames, path) self._save_replay_count += 1 obs = to_ndarray(obs) # wrapped to be transfered to a array with shape (1,) rew = to_ndarray([rew]).astype(np.float32) return BaseEnvTimestep(obs, rew, done, info) def enable_save_replay(self, replay_path: Optional[str] = None) -> None: if replay_path is None: replay_path = './video' self._save_replay = True if not os.path.exists(replay_path): os.makedirs(replay_path) self._replay_path = replay_path self._save_replay_count = 0 def random_action(self) -> np.ndarray: random_action = self.action_space.sample() random_action = to_ndarray([random_action], dtype=np.int64) return random_action def clone(self, caller: str) -> 'RocketEnv': return RocketEnv(copy.deepcopy(self._cfg)) @property def observation_space(self) -> gym.spaces.Space: return self._observation_space @property def action_space(self) -> gym.spaces.Space: return self._action_space @property def reward_space(self) -> gym.spaces.Space: return self._reward_space def __repr__(self) -> str: return "DI-engine Rocket Env" @staticmethod def display_frames_as_gif(frames: list, path: str) -> None: imageio.mimsave(path, frames, fps=20)
PypiClean
/BuildStream-2.0.1-cp39-cp39-manylinux_2_28_x86_64.whl/buildstream/sandbox/_config.py
from typing import TYPE_CHECKING, Dict, Optional, Union from .._platform import Platform if TYPE_CHECKING: from ..node import Node, MappingNode # SandboxConfig # # The Sandbox configuration parameters, this object carries configuration # required to instantiate the correct type of sandbox, and assert that # the local or remote worker sandbox has the capabilities required. # # Args: # build_os: The build OS name # build_arch: A canonical machine architecture name, as defined by Platform.canonicalize_arch() # build_uid: The UID for the sandbox process # build_gid: The GID for the sandbox process # # If the build_uid or build_gid is unspecified, then the underlying sandbox implementation # does not guarantee what UID/GID will be used, but generally UID/GID 0 will be used in a # sandbox implementation which supports UID/GID control. # # If the build_uid or build_gid is specified, then the UID/GID is guaranteed to match # the specified UID/GID, if the underlying sandbox implementation does not support UID/GID # control, then an error will be raised when attempting to configure the sandbox. # class SandboxConfig: def __init__( self, *, build_os: str, build_arch: str, build_uid: Optional[int] = None, build_gid: Optional[int] = None ): self.build_os = build_os self.build_arch = build_arch self.build_uid = build_uid self.build_gid = build_gid # to_dict(): # # Represent the SandboxConfig as a dictionary. # # This dictionary will be stored in the corresponding artifact # whenever an artifact is cached. When loading an element from # an artifact, then this dict will be loaded as a MappingNode # and interpreted by SandboxConfig.new_from_node(). # # This function is also used to contribute to the owning element's cache key. # # Returns: # A dictionary representation of this SandboxConfig # def to_dict(self) -> Dict[str, Union[str, int]]: # Assign mandatory portions of the sandbox configuration # # /!\ No additional mandatory members can ever be added to # the sandbox configuration, as that would result in # breaking cache key stability. # sandbox_dict: Dict[str, Union[str, int]] = {"build-os": self.build_os, "build-arch": self.build_arch} # Assign optional portions of the sandbox configuration # # /!\ In order to preserve cache key stability, these attributes # are only ever added to the dictionary if they have been # explicitly set, unset values must not affect the dictionary. # if self.build_uid is not None: sandbox_dict["build-uid"] = self.build_uid if self.build_gid is not None: sandbox_dict["build-gid"] = self.build_gid return sandbox_dict # new_from_node(): # # Instantiate a new SandboxConfig from YAML configuration. # # If the Platform is specified, then we expect to be loading # from project definitions, and some defaults will be derived # from the Platform. Otherwise, we expect to be loading from # a cached artifact, and values are expected to exist on the # given node. # # Args: # config: The YAML configuration node # platform: The host Platform instance, or None # # Returns: # A new SandboxConfig instance # @classmethod def new_from_node(cls, config: "MappingNode[Node]", *, platform: Optional[Platform] = None) -> "SandboxConfig": config.validate_keys(["build-uid", "build-gid", "build-os", "build-arch"]) build_os: str build_arch: str if platform: tmp = config.get_str("build-os", None) if tmp: build_os = tmp.lower() else: build_os = platform.get_host_os() tmp = config.get_str("build-arch", None) if tmp: build_arch = Platform.canonicalize_arch(tmp) else: build_arch = platform.get_host_arch() else: build_os = config.get_str("build-os") build_arch = config.get_str("build-arch") build_uid = config.get_int("build-uid", None) build_gid = config.get_int("build-gid", None) return cls(build_os=build_os, build_arch=build_arch, build_uid=build_uid, build_gid=build_gid)
PypiClean
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Protocol/WFS/v1_1_0.js
* @requires OpenLayers/Protocol/WFS/v1.js * @requires OpenLayers/Format/WFST/v1_1_0.js */ /** * Class: OpenLayers.Protocol.WFS.v1_1_0 * A WFS v1.1.0 protocol for vector layers. Create a new instance with the * <OpenLayers.Protocol.WFS.v1_1_0> constructor. * * Differences from the v1.0.0 protocol: * - uses Filter Encoding 1.1.0 instead of 1.0.0 * - uses GML 3 instead of 2 if no format is provided * * Inherits from: * - <OpenLayers.Protocol.WFS.v1> */ OpenLayers.Protocol.WFS.v1_1_0 = OpenLayers.Class(OpenLayers.Protocol.WFS.v1, { /** * Property: version * {String} WFS version number. */ version: "1.1.0", /** * Constructor: OpenLayers.Protocol.WFS.v1_1_0 * A class for giving layers WFS v1.1.0 protocol. * * Parameters: * options - {Object} Optional object whose properties will be set on the * instance. * * Valid options properties: * featureType - {String} Local (without prefix) feature typeName (required). * featureNS - {String} Feature namespace (optional). * featurePrefix - {String} Feature namespace alias (optional - only used * if featureNS is provided). Default is 'feature'. * geometryName - {String} Name of geometry attribute. Default is 'the_geom'. * outputFormat - {String} Optional output format to use for WFS GetFeature * requests. This can be any format advertized by the WFS's * GetCapabilities response. If set, an appropriate readFormat also * has to be provided, unless outputFormat is GML3, GML2 or JSON. * readFormat - {<OpenLayers.Format>} An appropriate format parser if * outputFormat is none of GML3, GML2 or JSON. */ initialize: function(options) { OpenLayers.Protocol.WFS.v1.prototype.initialize.apply(this, arguments); if (this.outputFormat && !this.readFormat) { if (this.outputFormat.toLowerCase() == "gml2") { this.readFormat = new OpenLayers.Format.GML.v2({ featureType: this.featureType, featureNS: this.featureNS, geometryName: this.geometryName }); } else if (this.outputFormat.toLowerCase() == "json") { this.readFormat = new OpenLayers.Format.GeoJSON(); } } }, CLASS_NAME: "OpenLayers.Protocol.WFS.v1_1_0" });
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/grid/Selection.js
define("dojox/grid/Selection",["dojo/_base/declare","dojo/_base/array","dojo/_base/lang","dojo/dom-attr"],function(_1,_2,_3,_4){ return _1("dojox.grid.Selection",null,{constructor:function(_5){ this.grid=_5; this.selected=[]; this.setMode(_5.selectionMode); },mode:"extended",selected:null,updating:0,selectedIndex:-1,setMode:function(_6){ if(this.selected.length){ this.deselectAll(); } if(_6!="extended"&&_6!="multiple"&&_6!="single"&&_6!="none"){ this.mode="extended"; }else{ this.mode=_6; } },onCanSelect:function(_7){ return this.grid.onCanSelect(_7); },onCanDeselect:function(_8){ return this.grid.onCanDeselect(_8); },onSelected:function(_9){ },onDeselected:function(_a){ },onChanging:function(){ },onChanged:function(){ },isSelected:function(_b){ if(this.mode=="none"){ return false; } return this.selected[_b]; },getFirstSelected:function(){ if(!this.selected.length||this.mode=="none"){ return -1; } for(var i=0,l=this.selected.length;i<l;i++){ if(this.selected[i]){ return i; } } return -1; },getNextSelected:function(_c){ if(this.mode=="none"){ return -1; } for(var i=_c+1,l=this.selected.length;i<l;i++){ if(this.selected[i]){ return i; } } return -1; },getSelected:function(){ var _d=[]; for(var i=0,l=this.selected.length;i<l;i++){ if(this.selected[i]){ _d.push(i); } } return _d; },getSelectedCount:function(){ var c=0; for(var i=0;i<this.selected.length;i++){ if(this.selected[i]){ c++; } } return c; },_beginUpdate:function(){ if(this.updating===0){ this.onChanging(); } this.updating++; },_endUpdate:function(){ this.updating--; if(this.updating===0){ this.onChanged(); } },select:function(_e){ if(this.mode=="none"){ return; } if(this.mode!="multiple"){ this.deselectAll(_e); this.addToSelection(_e); }else{ this.toggleSelect(_e); } },addToSelection:function(_f){ if(this.mode=="none"){ return; } if(_3.isArray(_f)){ _2.forEach(_f,this.addToSelection,this); return; } _f=Number(_f); if(this.selected[_f]){ this.selectedIndex=_f; }else{ if(this.onCanSelect(_f)!==false){ this.selectedIndex=_f; var _10=this.grid.getRowNode(_f); if(_10){ _4.set(_10,"aria-selected","true"); } this._beginUpdate(); this.selected[_f]=true; this.onSelected(_f); this._endUpdate(); } } },deselect:function(_11){ if(this.mode=="none"){ return; } if(_3.isArray(_11)){ _2.forEach(_11,this.deselect,this); return; } _11=Number(_11); if(this.selectedIndex==_11){ this.selectedIndex=-1; } if(this.selected[_11]){ if(this.onCanDeselect(_11)===false){ return; } var _12=this.grid.getRowNode(_11); if(_12){ _4.set(_12,"aria-selected","false"); } this._beginUpdate(); delete this.selected[_11]; this.onDeselected(_11); this._endUpdate(); } },setSelected:function(_13,_14){ this[(_14?"addToSelection":"deselect")](_13); },toggleSelect:function(_15){ if(_3.isArray(_15)){ _2.forEach(_15,this.toggleSelect,this); return; } this.setSelected(_15,!this.selected[_15]); },_range:function(_16,_17,_18){ var s=(_16>=0?_16:_17),e=_17; if(s>e){ e=s; s=_17; } for(var i=s;i<=e;i++){ _18(i); } },selectRange:function(_19,_1a){ this._range(_19,_1a,_3.hitch(this,"addToSelection")); },deselectRange:function(_1b,_1c){ this._range(_1b,_1c,_3.hitch(this,"deselect")); },insert:function(_1d){ this.selected.splice(_1d,0,false); if(this.selectedIndex>=_1d){ this.selectedIndex++; } },remove:function(_1e){ this.selected.splice(_1e,1); if(this.selectedIndex>=_1e){ this.selectedIndex--; } },deselectAll:function(_1f){ for(var i in this.selected){ if((i!=_1f)&&(this.selected[i]===true)){ this.deselect(i); } } },clickSelect:function(_20,_21,_22){ if(this.mode=="none"){ return; } this._beginUpdate(); if(this.mode!="extended"){ this.select(_20); }else{ var _23=this.selectedIndex; if(!_21){ this.deselectAll(_20); } if(_22){ this.selectRange(_23,_20); }else{ if(_21){ this.toggleSelect(_20); }else{ this.addToSelection(_20); } } } this._endUpdate(); },clickSelectEvent:function(e){ this.clickSelect(e.rowIndex,dojo.isCopyKey(e),e.shiftKey); },clear:function(){ this._beginUpdate(); this.deselectAll(); this._endUpdate(); }}); });
PypiClean
/Django-Pizza-16.10.1.tar.gz/Django-Pizza-16.10.1/pizza/blog/migrations/0004_auto__add_field_mediafile_duration.py
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'MediaFile.duration' db.add_column(u'blog_mediafile', 'duration', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'MediaFile.duration' db.delete_column(u'blog_mediafile', 'duration') models = { u'blog.blog': { 'Meta': {'ordering': "('title',)", 'object_name': 'Blog'}, 'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['kitchen_sink.Author']", 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {}), 'formats': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.FileFormat']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}), 'podcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'blog.category': { 'Meta': {'ordering': "('slug',)", 'object_name': 'Category'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'blog.fileformat': { 'Meta': {'ordering': "('title',)", 'object_name': 'FileFormat'}, 'ext': ('django.db.models.fields.SlugField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'blog.mediafile': { 'Meta': {'object_name': 'MediaFile'}, 'duration': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}), 'ext': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.FileFormat']"}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'post': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Post']"}) }, u'blog.post': { 'Meta': {'ordering': "('-publish',)", 'object_name': 'Post'}, 'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['kitchen_sink.Author']", 'null': 'True', 'blank': 'True'}), 'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['blog.Blog']"}), 'body': ('django.db.models.fields.TextField', [], {}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['blog.Category']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}), 'imageset': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.ImageSet']", 'null': 'True', 'blank': 'True'}), 'publish': ('django.db.models.fields.DateTimeField', [], {}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'kitchen_sink.author': { 'Meta': {'ordering': "('name',)", 'object_name': 'Author'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['kitchen_sink.Image']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}) }, u'kitchen_sink.image': { 'Meta': {'ordering': "('title',)", 'object_name': 'Image'}, 'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'caption_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'credit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'credit_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'kitchen_sink.imageset': { 'Meta': {'ordering': "('title',)", 'object_name': 'ImageSet'}, 'captype': ('django.db.models.fields.CharField', [], {'default': "'override'", 'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'sites.site': { 'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['blog']
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/make-dir/node_modules/semver/README.md
semver(1) -- The semantic versioner for npm =========================================== ## Install ```bash npm install semver ```` ## Usage As a node module: ```js const semver = require('semver') semver.valid('1.2.3') // '1.2.3' semver.valid('a.b.c') // null semver.clean(' =v1.2.3 ') // '1.2.3' semver.satisfies('1.2.3', '1.x || >=2.5.0 || 5.0.0 - 7.2.3') // true semver.gt('1.2.3', '9.8.7') // false semver.lt('1.2.3', '9.8.7') // true semver.minVersion('>=1.0.0') // '1.0.0' semver.valid(semver.coerce('v2')) // '2.0.0' semver.valid(semver.coerce('42.6.7.9.3-alpha')) // '42.6.7' ``` As a command-line utility: ``` $ semver -h A JavaScript implementation of the https://semver.org/ specification Copyright Isaac Z. Schlueter Usage: semver [options] <version> [<version> [...]] Prints valid versions sorted by SemVer precedence Options: -r --range <range> Print versions that match the specified range. -i --increment [<level>] Increment a version by the specified level. Level can be one of: major, minor, patch, premajor, preminor, prepatch, or prerelease. Default level is 'patch'. Only one version may be specified. --preid <identifier> Identifier to be used to prefix premajor, preminor, prepatch or prerelease version increments. -l --loose Interpret versions and ranges loosely -p --include-prerelease Always include prerelease versions in range matching -c --coerce Coerce a string into SemVer if possible (does not imply --loose) --rtl Coerce version strings right to left --ltr Coerce version strings left to right (default) Program exits successfully if any valid version satisfies all supplied ranges, and prints all satisfying versions. If no satisfying versions are found, then exits failure. Versions are printed in ascending order, so supplying multiple versions to the utility will just sort them. ``` ## Versions A "version" is described by the `v2.0.0` specification found at <https://semver.org/>. A leading `"="` or `"v"` character is stripped off and ignored. ## Ranges A `version range` is a set of `comparators` which specify versions that satisfy the range. A `comparator` is composed of an `operator` and a `version`. The set of primitive `operators` is: * `<` Less than * `<=` Less than or equal to * `>` Greater than * `>=` Greater than or equal to * `=` Equal. If no operator is specified, then equality is assumed, so this operator is optional, but MAY be included. For example, the comparator `>=1.2.7` would match the versions `1.2.7`, `1.2.8`, `2.5.3`, and `1.3.9`, but not the versions `1.2.6` or `1.1.0`. Comparators can be joined by whitespace to form a `comparator set`, which is satisfied by the **intersection** of all of the comparators it includes. A range is composed of one or more comparator sets, joined by `||`. A version matches a range if and only if every comparator in at least one of the `||`-separated comparator sets is satisfied by the version. For example, the range `>=1.2.7 <1.3.0` would match the versions `1.2.7`, `1.2.8`, and `1.2.99`, but not the versions `1.2.6`, `1.3.0`, or `1.1.0`. The range `1.2.7 || >=1.2.9 <2.0.0` would match the versions `1.2.7`, `1.2.9`, and `1.4.6`, but not the versions `1.2.8` or `2.0.0`. ### Prerelease Tags If a version has a prerelease tag (for example, `1.2.3-alpha.3`) then it will only be allowed to satisfy comparator sets if at least one comparator with the same `[major, minor, patch]` tuple also has a prerelease tag. For example, the range `>1.2.3-alpha.3` would be allowed to match the version `1.2.3-alpha.7`, but it would *not* be satisfied by `3.4.5-alpha.9`, even though `3.4.5-alpha.9` is technically "greater than" `1.2.3-alpha.3` according to the SemVer sort rules. The version range only accepts prerelease tags on the `1.2.3` version. The version `3.4.5` *would* satisfy the range, because it does not have a prerelease flag, and `3.4.5` is greater than `1.2.3-alpha.7`. The purpose for this behavior is twofold. First, prerelease versions frequently are updated very quickly, and contain many breaking changes that are (by the author's design) not yet fit for public consumption. Therefore, by default, they are excluded from range matching semantics. Second, a user who has opted into using a prerelease version has clearly indicated the intent to use *that specific* set of alpha/beta/rc versions. By including a prerelease tag in the range, the user is indicating that they are aware of the risk. However, it is still not appropriate to assume that they have opted into taking a similar risk on the *next* set of prerelease versions. Note that this behavior can be suppressed (treating all prerelease versions as if they were normal versions, for the purpose of range matching) by setting the `includePrerelease` flag on the options object to any [functions](https://github.com/npm/node-semver#functions) that do range matching. #### Prerelease Identifiers The method `.inc` takes an additional `identifier` string argument that will append the value of the string as a prerelease identifier: ```javascript semver.inc('1.2.3', 'prerelease', 'beta') // '1.2.4-beta.0' ``` command-line example: ```bash $ semver 1.2.3 -i prerelease --preid beta 1.2.4-beta.0 ``` Which then can be used to increment further: ```bash $ semver 1.2.4-beta.0 -i prerelease 1.2.4-beta.1 ``` ### Advanced Range Syntax Advanced range syntax desugars to primitive comparators in deterministic ways. Advanced ranges may be combined in the same way as primitive comparators using white space or `||`. #### Hyphen Ranges `X.Y.Z - A.B.C` Specifies an inclusive set. * `1.2.3 - 2.3.4` := `>=1.2.3 <=2.3.4` If a partial version is provided as the first version in the inclusive range, then the missing pieces are replaced with zeroes. * `1.2 - 2.3.4` := `>=1.2.0 <=2.3.4` If a partial version is provided as the second version in the inclusive range, then all versions that start with the supplied parts of the tuple are accepted, but nothing that would be greater than the provided tuple parts. * `1.2.3 - 2.3` := `>=1.2.3 <2.4.0` * `1.2.3 - 2` := `>=1.2.3 <3.0.0` #### X-Ranges `1.2.x` `1.X` `1.2.*` `*` Any of `X`, `x`, or `*` may be used to "stand in" for one of the numeric values in the `[major, minor, patch]` tuple. * `*` := `>=0.0.0` (Any version satisfies) * `1.x` := `>=1.0.0 <2.0.0` (Matching major version) * `1.2.x` := `>=1.2.0 <1.3.0` (Matching major and minor versions) A partial version range is treated as an X-Range, so the special character is in fact optional. * `""` (empty string) := `*` := `>=0.0.0` * `1` := `1.x.x` := `>=1.0.0 <2.0.0` * `1.2` := `1.2.x` := `>=1.2.0 <1.3.0` #### Tilde Ranges `~1.2.3` `~1.2` `~1` Allows patch-level changes if a minor version is specified on the comparator. Allows minor-level changes if not. * `~1.2.3` := `>=1.2.3 <1.(2+1).0` := `>=1.2.3 <1.3.0` * `~1.2` := `>=1.2.0 <1.(2+1).0` := `>=1.2.0 <1.3.0` (Same as `1.2.x`) * `~1` := `>=1.0.0 <(1+1).0.0` := `>=1.0.0 <2.0.0` (Same as `1.x`) * `~0.2.3` := `>=0.2.3 <0.(2+1).0` := `>=0.2.3 <0.3.0` * `~0.2` := `>=0.2.0 <0.(2+1).0` := `>=0.2.0 <0.3.0` (Same as `0.2.x`) * `~0` := `>=0.0.0 <(0+1).0.0` := `>=0.0.0 <1.0.0` (Same as `0.x`) * `~1.2.3-beta.2` := `>=1.2.3-beta.2 <1.3.0` Note that prereleases in the `1.2.3` version will be allowed, if they are greater than or equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but `1.2.4-beta.2` would not, because it is a prerelease of a different `[major, minor, patch]` tuple. #### Caret Ranges `^1.2.3` `^0.2.5` `^0.0.4` Allows changes that do not modify the left-most non-zero element in the `[major, minor, patch]` tuple. In other words, this allows patch and minor updates for versions `1.0.0` and above, patch updates for versions `0.X >=0.1.0`, and *no* updates for versions `0.0.X`. Many authors treat a `0.x` version as if the `x` were the major "breaking-change" indicator. Caret ranges are ideal when an author may make breaking changes between `0.2.4` and `0.3.0` releases, which is a common practice. However, it presumes that there will *not* be breaking changes between `0.2.4` and `0.2.5`. It allows for changes that are presumed to be additive (but non-breaking), according to commonly observed practices. * `^1.2.3` := `>=1.2.3 <2.0.0` * `^0.2.3` := `>=0.2.3 <0.3.0` * `^0.0.3` := `>=0.0.3 <0.0.4` * `^1.2.3-beta.2` := `>=1.2.3-beta.2 <2.0.0` Note that prereleases in the `1.2.3` version will be allowed, if they are greater than or equal to `beta.2`. So, `1.2.3-beta.4` would be allowed, but `1.2.4-beta.2` would not, because it is a prerelease of a different `[major, minor, patch]` tuple. * `^0.0.3-beta` := `>=0.0.3-beta <0.0.4` Note that prereleases in the `0.0.3` version *only* will be allowed, if they are greater than or equal to `beta`. So, `0.0.3-pr.2` would be allowed. When parsing caret ranges, a missing `patch` value desugars to the number `0`, but will allow flexibility within that value, even if the major and minor versions are both `0`. * `^1.2.x` := `>=1.2.0 <2.0.0` * `^0.0.x` := `>=0.0.0 <0.1.0` * `^0.0` := `>=0.0.0 <0.1.0` A missing `minor` and `patch` values will desugar to zero, but also allow flexibility within those values, even if the major version is zero. * `^1.x` := `>=1.0.0 <2.0.0` * `^0.x` := `>=0.0.0 <1.0.0` ### Range Grammar Putting all this together, here is a Backus-Naur grammar for ranges, for the benefit of parser authors: ```bnf range-set ::= range ( logical-or range ) * logical-or ::= ( ' ' ) * '||' ( ' ' ) * range ::= hyphen | simple ( ' ' simple ) * | '' hyphen ::= partial ' - ' partial simple ::= primitive | partial | tilde | caret primitive ::= ( '<' | '>' | '>=' | '<=' | '=' ) partial partial ::= xr ( '.' xr ( '.' xr qualifier ? )? )? xr ::= 'x' | 'X' | '*' | nr nr ::= '0' | ['1'-'9'] ( ['0'-'9'] ) * tilde ::= '~' partial caret ::= '^' partial qualifier ::= ( '-' pre )? ( '+' build )? pre ::= parts build ::= parts parts ::= part ( '.' part ) * part ::= nr | [-0-9A-Za-z]+ ``` ## Functions All methods and classes take a final `options` object argument. All options in this object are `false` by default. The options supported are: - `loose` Be more forgiving about not-quite-valid semver strings. (Any resulting output will always be 100% strict compliant, of course.) For backwards compatibility reasons, if the `options` argument is a boolean value instead of an object, it is interpreted to be the `loose` param. - `includePrerelease` Set to suppress the [default behavior](https://github.com/npm/node-semver#prerelease-tags) of excluding prerelease tagged versions from ranges unless they are explicitly opted into. Strict-mode Comparators and Ranges will be strict about the SemVer strings that they parse. * `valid(v)`: Return the parsed version, or null if it's not valid. * `inc(v, release)`: Return the version incremented by the release type (`major`, `premajor`, `minor`, `preminor`, `patch`, `prepatch`, or `prerelease`), or null if it's not valid * `premajor` in one call will bump the version up to the next major version and down to a prerelease of that major version. `preminor`, and `prepatch` work the same way. * If called from a non-prerelease version, the `prerelease` will work the same as `prepatch`. It increments the patch version, then makes a prerelease. If the input version is already a prerelease it simply increments it. * `prerelease(v)`: Returns an array of prerelease components, or null if none exist. Example: `prerelease('1.2.3-alpha.1') -> ['alpha', 1]` * `major(v)`: Return the major version number. * `minor(v)`: Return the minor version number. * `patch(v)`: Return the patch version number. * `intersects(r1, r2, loose)`: Return true if the two supplied ranges or comparators intersect. * `parse(v)`: Attempt to parse a string as a semantic version, returning either a `SemVer` object or `null`. ### Comparison * `gt(v1, v2)`: `v1 > v2` * `gte(v1, v2)`: `v1 >= v2` * `lt(v1, v2)`: `v1 < v2` * `lte(v1, v2)`: `v1 <= v2` * `eq(v1, v2)`: `v1 == v2` This is true if they're logically equivalent, even if they're not the exact same string. You already know how to compare strings. * `neq(v1, v2)`: `v1 != v2` The opposite of `eq`. * `cmp(v1, comparator, v2)`: Pass in a comparison string, and it'll call the corresponding function above. `"==="` and `"!=="` do simple string comparison, but are included for completeness. Throws if an invalid comparison string is provided. * `compare(v1, v2)`: Return `0` if `v1 == v2`, or `1` if `v1` is greater, or `-1` if `v2` is greater. Sorts in ascending order if passed to `Array.sort()`. * `rcompare(v1, v2)`: The reverse of compare. Sorts an array of versions in descending order when passed to `Array.sort()`. * `compareBuild(v1, v2)`: The same as `compare` but considers `build` when two versions are equal. Sorts in ascending order if passed to `Array.sort()`. `v2` is greater. Sorts in ascending order if passed to `Array.sort()`. * `diff(v1, v2)`: Returns difference between two versions by the release type (`major`, `premajor`, `minor`, `preminor`, `patch`, `prepatch`, or `prerelease`), or null if the versions are the same. ### Comparators * `intersects(comparator)`: Return true if the comparators intersect ### Ranges * `validRange(range)`: Return the valid range or null if it's not valid * `satisfies(version, range)`: Return true if the version satisfies the range. * `maxSatisfying(versions, range)`: Return the highest version in the list that satisfies the range, or `null` if none of them do. * `minSatisfying(versions, range)`: Return the lowest version in the list that satisfies the range, or `null` if none of them do. * `minVersion(range)`: Return the lowest version that can possibly match the given range. * `gtr(version, range)`: Return `true` if version is greater than all the versions possible in the range. * `ltr(version, range)`: Return `true` if version is less than all the versions possible in the range. * `outside(version, range, hilo)`: Return true if the version is outside the bounds of the range in either the high or low direction. The `hilo` argument must be either the string `'>'` or `'<'`. (This is the function called by `gtr` and `ltr`.) * `intersects(range)`: Return true if any of the ranges comparators intersect Note that, since ranges may be non-contiguous, a version might not be greater than a range, less than a range, *or* satisfy a range! For example, the range `1.2 <1.2.9 || >2.0.0` would have a hole from `1.2.9` until `2.0.0`, so the version `1.2.10` would not be greater than the range (because `2.0.1` satisfies, which is higher), nor less than the range (since `1.2.8` satisfies, which is lower), and it also does not satisfy the range. If you want to know if a version satisfies or does not satisfy a range, use the `satisfies(version, range)` function. ### Coercion * `coerce(version, options)`: Coerces a string to semver if possible This aims to provide a very forgiving translation of a non-semver string to semver. It looks for the first digit in a string, and consumes all remaining characters which satisfy at least a partial semver (e.g., `1`, `1.2`, `1.2.3`) up to the max permitted length (256 characters). Longer versions are simply truncated (`4.6.3.9.2-alpha2` becomes `4.6.3`). All surrounding text is simply ignored (`v3.4 replaces v3.3.1` becomes `3.4.0`). Only text which lacks digits will fail coercion (`version one` is not valid). The maximum length for any semver component considered for coercion is 16 characters; longer components will be ignored (`10000000000000000.4.7.4` becomes `4.7.4`). The maximum value for any semver component is `Integer.MAX_SAFE_INTEGER || (2**53 - 1)`; higher value components are invalid (`9999999999999999.4.7.4` is likely invalid). If the `options.rtl` flag is set, then `coerce` will return the right-most coercible tuple that does not share an ending index with a longer coercible tuple. For example, `1.2.3.4` will return `2.3.4` in rtl mode, not `4.0.0`. `1.2.3/4` will return `4.0.0`, because the `4` is not a part of any other overlapping SemVer tuple. ### Clean * `clean(version)`: Clean a string to be a valid semver if possible This will return a cleaned and trimmed semver version. If the provided version is not valid a null will be returned. This does not work for ranges. ex. * `s.clean(' = v 2.1.5foo')`: `null` * `s.clean(' = v 2.1.5foo', { loose: true })`: `'2.1.5-foo'` * `s.clean(' = v 2.1.5-foo')`: `null` * `s.clean(' = v 2.1.5-foo', { loose: true })`: `'2.1.5-foo'` * `s.clean('=v2.1.5')`: `'2.1.5'` * `s.clean(' =v2.1.5')`: `2.1.5` * `s.clean(' 2.1.5 ')`: `'2.1.5'` * `s.clean('~1.0.0')`: `null`
PypiClean
/Odte-0.3.4.tar.gz/Odte-0.3.4/README.md
![CI](https://github.com/Doctorado-ML/Odte/workflows/CI/badge.svg) [![CodeQL](https://github.com/Doctorado-ML/Odte/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/Doctorado-ML/Odte/actions/workflows/codeql-analysis.yml) [![codecov](https://codecov.io/gh/Doctorado-ML/odte/branch/master/graph/badge.svg)](https://codecov.io/gh/Doctorado-ML/odte) [![Codacy Badge](https://app.codacy.com/project/badge/Grade/f4b5ef87584b4095b6e49aefbe594c82)](https://www.codacy.com/gh/Doctorado-ML/Odte/dashboard?utm_source=github.com&utm_medium=referral&utm_content=Doctorado-ML/Odte&utm_campaign=Badge_Grade) [![PyPI version](https://badge.fury.io/py/Odte.svg)](https://badge.fury.io/py/Odte) ![https://img.shields.io/badge/python-3.8%2B-blue](https://img.shields.io/badge/python-3.8%2B-brightgreen) [![DOI](https://zenodo.org/badge/271595804.svg)](https://zenodo.org/badge/latestdoi/271595804) # Odte Oblique Decision Tree Ensemble
PypiClean
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/models/collectionentitycache.py
from __future__ import unicode_literals from __future__ import absolute_import, division, print_function """ This module is used to cache per-collection information about entities of some designated type. """ __author__ = "Graham Klyne ([email protected])" __copyright__ = "Copyright 2018, G. Klyne" __license__ = "MIT (http://opensource.org/licenses/MIT)" import logging log = logging.getLogger(__name__) from annalist import layout from annalist.exceptions import Annalist_Error from annalist.identifiers import ANNAL, RDFS from annalist.models.objectcache import get_cache, remove_cache # , remove_matching_caches # --------------------------------------------------------------------------- # # Local helper functions # # --------------------------------------------------------------------------- def make_cache_key(cache_type, entity_type_id, coll_id): return (cache_type, entity_type_id, coll_id) def match_cache_key_unused_(cache_types, entity_cls): def match_fn(cachekey): return (cachekey[0] in cache_types) and (cachekey[1] == entity_cls._entitytypeid) return match_fn # --------------------------------------------------------------------------- # # Error class # # --------------------------------------------------------------------------- class Cache_Error(Annalist_Error): """ Class for errors raised by closure calculations. """ def __init__(self, value=None, msg="Cache_error (collectionentityache)"): super(Cache_Error, self).__init__(value, msg) return # --------------------------------------------------------------------------- # # Entity-cache object class # # --------------------------------------------------------------------------- class CollectionEntityCacheObject(object): """ This class is an entity cache for a specified collection and entity type. NOTE: entities are instantiated with respect to a specified collection, but the collection objects are transient (regenerated for each request), so the cache stores the entity values but not the instantiated entities. Two kinds of information are cached: 1. entity cache: details of all entities that are visible in this class, indexed by entity id and entity URI 2. scope cache: lists of entity ids that are visible in different scopes: used when returning entity enumerations (see method "get_all_entities"). The scope cache is populated by calls to "get_all_entities". When a entity is added to or removed from the entity cache, lacking information about the scopes where it is visible, the scope cache is cleared. Scope values currently include "user", "all", "site"; None => "coll". Apart from treating None as collection local scope, the logic in this class treats scope names as opaque identifiers. The scope logic is embedded mainly in the Entity and EntityRoot class methods "_children". """ _cache_types = {"entities_by_id", "entity_ids_by_uri", "entity_ids_by_scope"} def __init__(self, coll_id, entity_cls): """ Initialize a cache object for a specified collection. coll_id Collection id with which the entity cache is associated. """ super(CollectionEntityCacheObject, self).__init__() self._coll_id = coll_id self._entity_cls = entity_cls self._type_id = entity_cls._entitytypeid self._entities_by_id = None self._entity_ids_by_uri = None self._entity_ids_by_scope = None self._site_cache = None return def _make_cache_key(self, cache_type): return make_cache_key(cache_type, self._type_id, self._coll_id) def _make_entity(self, coll, entity_id, entity_values): """ Internal helper method to construct an entity given its Id and values. coll is collection entity to which the new identity will belong entity_id is the new entity id entity_values is a dictionary containing: ["parent_id"] is the id of the parent entity ["data"] is a dictionary of values for the new entity Returns None if either Id or values evaluate as Boolean False (i.e. are None or empty), or if the parent collection is no longer accessible. """ entity = None if entity_id and entity_values: parent_id = entity_values["parent_id"] parent = coll if coll.get_id() != parent_id: for parent in coll.get_alt_entities(altscope="all"): if parent.get_id() == parent_id: break else: msg = ( "Saved parent id %s not found for entity %s/%s in collection %s"% (parent_id, self._type_id, entity_id, coll.get_id()) ) log.error(msg) return None # raise ValueError(msg) entity = self._entity_cls._child_init(parent, entity_id) entity.set_values(entity_values["data"]) return entity def _load_entity(self, coll, entity, entity_uri=None): """ Internal helper method saves entity data to cache. This function does not actually read entity data. Returns True if new entity data is added, otherwise False. """ entity_id = entity.get_id() if not entity_uri: entity_uri = entity.get_uri() entity_parent = entity.get_parent().get_id() entity_data = entity.get_save_values() add_entity = False with self._entities_by_id.access(entity_id) as es: if entity_id not in es: # Update cache via context handler es[entity_id] = {"parent_id": entity_parent, "data": entity_data} # Update other caches while _entities_by_id lock is acquired self._entity_ids_by_uri.set(entity_uri, entity_id) self._entity_ids_by_scope.flush() add_entity = True return add_entity def _load_entities(self, coll): """ Initialize cache of entities, if not already done. NOTE: site level entitites are cached separately by the collection cache manager, and merged separately. Hence "nosite" scope here. From entity.py: "nosite" - collection-level only: used for listing entities from just collections. Used when cacheing data, where site data is assumed to be invariant, hence no need to re-load. """ scope_name = "nosite" if self._site_cache else "all" if self._entities_by_id is None: self._entities_by_id = get_cache(self._make_cache_key("entities_by_id")) self._entity_ids_by_uri = get_cache(self._make_cache_key("entity_ids_by_uri")) self._entity_ids_by_scope = get_cache(self._make_cache_key("entity_ids_by_scope")) for entity_id in coll._children(self._entity_cls, altscope=scope_name): t = self._entity_cls.load(coll, entity_id, altscope=scope_name) self._load_entity(coll, t) return def _drop_entity(self, coll, entity_id): """ Drop entity from collection cache. Returns the entity removed, or None if not found. """ entity_values = self._entities_by_id.get(entity_id, None) entity = self._make_entity(coll, entity_id, entity_values) if entity: entity_uri = entity.get_uri() self._entities_by_id.pop(entity_id, None) self._entity_ids_by_uri.pop(entity_uri, None) self._entity_ids_by_scope.flush() return entity def set_site_cache(self, site_cache): self._site_cache = site_cache return def get_coll_id(self): return self._coll_id def set_entity(self, coll, entity): """ Save a new or updated entity definition. """ self._load_entities(coll) self._load_entity(coll, entity) return def remove_entity(self, coll, entity_id): """ Remove entity from collection cache. Returns the entity removed, or None if not found. """ self._load_entities(coll) # @@TODO: is this needed? return self._drop_entity(coll, entity_id) def get_entity(self, coll, entity_id): """ Retrieve the entity for a given entity id. Returns an entity for the supplied entity Id, or None if not defined for the current collection. """ self._load_entities(coll) entity_values = self._entities_by_id.get(entity_id, None) if entity_values: return self._make_entity(coll, entity_id, entity_values) # If not in collection cache, look for value in site cache: if self._site_cache: return self._site_cache.get_entity(coll.get_site_data(), entity_id) return None def get_entity_from_uri(self, coll, entity_uri): """ Retrieve an entity for a given entity URI. Returns an entity for the specified collecion and entuty URI, or None if the entity URI does not exist """ self._load_entities(coll) entity_id = self._entity_ids_by_uri.get(entity_uri, None) if entity_id: entity = self.get_entity(coll, entity_id) return entity # If not in collection cache, look for value in site cache: if self._site_cache: return self._site_cache.get_entity_from_uri( coll.get_site_data(), entity_uri ) return None def get_all_entity_ids(self, coll, altscope=None): """ Returns an iterator over all entity ids currently defined for a collection, which may be qualified by a specified scope. NOTE: this method returns only those entity ids for which a record has been saved to the collection data storage. """ self._load_entities(coll) scope_name = altscope or "coll" # 'None' designates collection-local scope scope_entity_ids = [] with self._entity_ids_by_scope.access(scope_name) as eids: if scope_name in eids: scope_entity_ids = eids[scope_name] else: # Collect entity ids for named scope for entity_id in coll._children(self._entity_cls, altscope=altscope): if entity_id != layout.INITIAL_VALUES_ID: scope_entity_ids.append(entity_id) # Update cache via context manager eids[scope_name] = scope_entity_ids return scope_entity_ids def get_all_entities(self, coll, altscope=None): """ Returns a generator of all entities currently defined for a collection, which may be qualified by a specified scope. NOTE: this method returns only those records that have actually been saved to the collection data storage. """ scope_entity_ids = self.get_all_entity_ids(coll, altscope=altscope) for entity_id in scope_entity_ids: t = self.get_entity(coll, entity_id) if t: yield t return def remove_cache(self): """ Close down and release all entity cache data """ if self._entities_by_id: remove_cache(self._entities_by_id.cache_key()) self._entities_by_id = None if self._entity_ids_by_uri: remove_cache(self._entity_ids_by_uri.cache_key()) self._entity_ids_by_uri = None if self._entity_ids_by_scope: remove_cache(self._entity_ids_by_scope.cache_key()) self._entity_ids_by_scope = None return # --------------------------------------------------------------------------- # # Collection entity-cache class # # --------------------------------------------------------------------------- coll_cache_by_type_id_coll_id = {} class CollectionEntityCache(object): """ This class manages multiple-collection cache objects """ def __init__(self, cache_cls, entity_cls): """ Initializes a value cache with no per-collection data. cache_cls is a class object for the collaction cache objects to be used. The constructor is called with collection id and entity class as parameters (see method `_get_cache`). entity_cls is a class object for the type of entity to be cached. """ super(CollectionEntityCache, self).__init__() self._cache_cls = cache_cls self._entity_cls = entity_cls self._type_id = entity_cls._entitytypeid coll_cache_by_type_id_coll_id[self._type_id] = {} return # Generic collection cache alllocation and access methods def _get_site_cache(self): """ Local helper returns a cache object for the site-wide entities """ if layout.SITEDATA_ID not in coll_cache_by_type_id_coll_id[self._type_id]: # log.info( # "CollectionEntityCache: creating %s cache for collection %s"% # (self._type_id, layout.SITEDATA_ID) # ) # Create and save new cache object site_cache = self._cache_cls(layout.SITEDATA_ID, self._entity_cls) coll_cache_by_type_id_coll_id[self._type_id][layout.SITEDATA_ID] = site_cache return coll_cache_by_type_id_coll_id[self._type_id][layout.SITEDATA_ID] def _get_cache(self, coll): """ Local helper returns a cache object for a specified collection. Creates a new cache object if needed. coll is a collection object for which a cache object is obtained """ coll_id = coll.get_id() # log.info( # "CollectionEntityCache: get %s cache for collection %s"% # (self._type_id, coll_id) # ) if coll_id not in coll_cache_by_type_id_coll_id[self._type_id]: # log.debug( # "CollectionEntityCache: creating %s cache for collection %s"% # (self._type_id, coll_id) # ) # Create and save new cache object coll_cache = self._cache_cls(coll_id, self._entity_cls) coll_cache.set_site_cache(self._get_site_cache()) coll_cache_by_type_id_coll_id[self._type_id][coll_id] = coll_cache return coll_cache_by_type_id_coll_id[self._type_id][coll_id] def flush_cache(self, coll): """ Remove all cached data for a specified collection. Returns True if the cache object was defined, otherwise False. coll is a collection object for which a cache is removed. """ coll_id = coll.get_id() cache = coll_cache_by_type_id_coll_id[self._type_id].pop(coll_id, None) if cache: cache.remove_cache() # log.info( # "CollectionEntityCache: flushed %s cache for collection %s"% # (self._type_id, coll_id) # ) return True return False def flush_all(self): """ Remove all cached data for all collections. """ # remove_cache_types = CollectionEntityCacheObject._cache_types # find_matching_caches( # match_cache_key(remove_cache_types, entity_cls), # lambda cache: cache.close() # ) caches = coll_cache_by_type_id_coll_id[self._type_id] coll_cache_by_type_id_coll_id[self._type_id] = {} # log.info( # "CollectionEntityCache: flushing %s cache for collections %r"% # (self._type_id, caches.keys()) # ) for coll_id in caches: caches[coll_id].remove_cache() return # Collection cache allocation and access methods def set_entity(self, coll, entity): """ Save a new or updated type definition """ entity_cache = self._get_cache(coll) return entity_cache.set_entity(coll, entity) def remove_entity(self, coll, entity_id): """ Remove entity from collection cache. Returns the entity removed if found, or None if not defined. """ entity_cache = self._get_cache(coll) return entity_cache.remove_entity(coll, entity_id) def get_entity(self, coll, entity_id): """ Retrieve an entity for a given entity id. Returns an entity object for the specified collection and entity id. """ entity_cache = self._get_cache(coll) return entity_cache.get_entity(coll, entity_id) def get_entity_from_uri(self, coll, entity_uri): """ Retrieve en entity for a given collection and entity URI. Returns an entity object for the specified collection and entity URI. """ entity_cache = self._get_cache(coll) return entity_cache.get_entity_from_uri(coll, entity_uri) def get_all_entity_ids(self, coll, altscope=None): """ Returns all entities currently available for a collection in the indicated scope. Default scope is entities defined directly in the indicated collection. """ entity_cache = self._get_cache(coll) return entity_cache.get_all_entity_ids(coll, altscope=altscope) def get_all_entities(self, coll, altscope=None): """ Returns all entities currently available for a collection in the indicated scope. Default scope is entities defined directly in the indicated collection. """ entity_cache = self._get_cache(coll) return entity_cache.get_all_entities(coll, altscope=altscope) # End.
PypiClean
/Congo-0.0.1.tar.gz/Congo-0.0.1/portfolio/component/static/portfolio/vendor/jquery/src/css/support.js
define([ "../core", "../var/support" ], function( jQuery, support ) { (function() { // Minified: var b,c,d,e,f,g, h,i var div, style, a, pixelPositionVal, boxSizingReliableVal, reliableHiddenOffsetsVal, reliableMarginRightVal; // Setup div = document.createElement( "div" ); div.innerHTML = " <link/><table></table><a href='/a'>a</a><input type='checkbox'/>"; a = div.getElementsByTagName( "a" )[ 0 ]; style = a && a.style; // Finish early in limited (non-browser) environments if ( !style ) { return; } style.cssText = "float:left;opacity:.5"; // Support: IE<9 // Make sure that element opacity exists (as opposed to filter) support.opacity = style.opacity === "0.5"; // Verify style float existence // (IE uses styleFloat instead of cssFloat) support.cssFloat = !!style.cssFloat; div.style.backgroundClip = "content-box"; div.cloneNode( true ).style.backgroundClip = ""; support.clearCloneStyle = div.style.backgroundClip === "content-box"; // Support: Firefox<29, Android 2.3 // Vendor-prefix box-sizing support.boxSizing = style.boxSizing === "" || style.MozBoxSizing === "" || style.WebkitBoxSizing === ""; jQuery.extend(support, { reliableHiddenOffsets: function() { if ( reliableHiddenOffsetsVal == null ) { computeStyleTests(); } return reliableHiddenOffsetsVal; }, boxSizingReliable: function() { if ( boxSizingReliableVal == null ) { computeStyleTests(); } return boxSizingReliableVal; }, pixelPosition: function() { if ( pixelPositionVal == null ) { computeStyleTests(); } return pixelPositionVal; }, // Support: Android 2.3 reliableMarginRight: function() { if ( reliableMarginRightVal == null ) { computeStyleTests(); } return reliableMarginRightVal; } }); function computeStyleTests() { // Minified: var b,c,d,j var div, body, container, contents; body = document.getElementsByTagName( "body" )[ 0 ]; if ( !body || !body.style ) { // Test fired too early or in an unsupported environment, exit. return; } // Setup div = document.createElement( "div" ); container = document.createElement( "div" ); container.style.cssText = "position:absolute;border:0;width:0;height:0;top:0;left:-9999px"; body.appendChild( container ).appendChild( div ); div.style.cssText = // Support: Firefox<29, Android 2.3 // Vendor-prefix box-sizing "-webkit-box-sizing:border-box;-moz-box-sizing:border-box;" + "box-sizing:border-box;display:block;margin-top:1%;top:1%;" + "border:1px;padding:1px;width:4px;position:absolute"; // Support: IE<9 // Assume reasonable values in the absence of getComputedStyle pixelPositionVal = boxSizingReliableVal = false; reliableMarginRightVal = true; // Check for getComputedStyle so that this code is not run in IE<9. if ( window.getComputedStyle ) { pixelPositionVal = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; boxSizingReliableVal = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; // Support: Android 2.3 // Div with explicit width and no margin-right incorrectly // gets computed margin-right based on width of container (#3333) // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right contents = div.appendChild( document.createElement( "div" ) ); // Reset CSS: box-sizing; display; margin; border; padding contents.style.cssText = div.style.cssText = // Support: Firefox<29, Android 2.3 // Vendor-prefix box-sizing "-webkit-box-sizing:content-box;-moz-box-sizing:content-box;" + "box-sizing:content-box;display:block;margin:0;border:0;padding:0"; contents.style.marginRight = contents.style.width = "0"; div.style.width = "1px"; reliableMarginRightVal = !parseFloat( ( window.getComputedStyle( contents, null ) || {} ).marginRight ); div.removeChild( contents ); } // Support: IE8 // Check if table cells still have offsetWidth/Height when they are set // to display:none and there are still other visible table cells in a // table row; if so, offsetWidth/Height are not reliable for use when // determining if an element has been hidden directly using // display:none (it is still safe to use offsets if a parent element is // hidden; don safety goggles and see bug #4512 for more information). div.innerHTML = "<table><tr><td></td><td>t</td></tr></table>"; contents = div.getElementsByTagName( "td" ); contents[ 0 ].style.cssText = "margin:0;border:0;padding:0;display:none"; reliableHiddenOffsetsVal = contents[ 0 ].offsetHeight === 0; if ( reliableHiddenOffsetsVal ) { contents[ 0 ].style.display = ""; contents[ 1 ].style.display = "none"; reliableHiddenOffsetsVal = contents[ 0 ].offsetHeight === 0; } body.removeChild( container ); } })(); return support; });
PypiClean
/JustDeepIt-0.1.30.tar.gz/JustDeepIt-0.1.30/justdeepit/webapp/appbase.py
import os import glob import datetime import pathlib import json import logging import traceback logger = logging.getLogger(__name__) class AppCode: def __init__(self): self.CONFIG = 'CONFIG' self.TRAINING = 'TRAINING' self.INFERENCE = 'INFERENCE' self.STARTED = 'STARTED' self.RUNNING = 'RUNNING' self.FINISHED = 'FINISHED' self.ERROR = 'ERROR' self.INTERRUPT = 'INTERRUPT' self.COMPLETED = 'COMPLETED' self.JOB__INIT_WORKSPACE = 'INIT_WORKSPACE' self.JOB__SAVE_INIT_MODEL = 'SAVE_INIT_MODEL' self.JOB__SORT_IMAGES = 'SORT_IMAGES' self.JOB__TRAIN_MODEL = 'TRIAN_MODEL' self.JOB__INFER = 'INFER' self.JOB__SUMMARIZE = 'SUMMARIZE_RESULTS' class AppBase: def __init__(self, workspace): self.app = '(base)' self.code = AppCode() self.workspace = workspace self.workspace_ = os.path.join(workspace, 'justdeepitws') self.image_ext = ('.jpg', '.jpeg', '.png', '.tiff', '.tif') self.job_status_fpath = os.path.join(self.workspace_, 'config', 'job_status.txt') self.init_workspace() self.images = [] def init_workspace(self): try: workspace_subdirs = ['', 'tmp', 'config', 'data', 'data/train', 'data/query', 'log', 'outputs'] for workspace_subdir in workspace_subdirs: workspace_subdir_abspath = os.path.join(self.workspace_, workspace_subdir) if not os.path.exists(workspace_subdir_abspath): os.mkdir(workspace_subdir_abspath) job_status = self.set_jobstatus(self.code.CONFIG, self.code.JOB__INIT_WORKSPACE, self.code.STARTED, '') job_status = self.set_jobstatus(self.code.CONFIG, self.code.JOB__INIT_WORKSPACE, self.code.FINISHED, '') except KeyboardInterrupt: job_status = self.set_jobstatus(self.code.CONFIG, self.code.JOB__INIT_WORKSPACE, self.code.INTERRUPT, '') except BaseException as e: traceback.print_exc() job_status = self.set_jobstatus(self.code.CONFIG, self.code.JOB__INIT_WORKSPACE, self.code.ERROR, str(e)) else: job_status = self.set_jobstatus(self.code.CONFIG, self.code.JOB__INIT_WORKSPACE, self.code.COMPLETED, '') return job_status def set_jobstatus(self, module_name, job_code, job_status_code, msg=''): with open(self.job_status_fpath, 'a') as outfh: outfh.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format( self.app, datetime.datetime.now().isoformat(), module_name, job_code, job_status_code, msg )) return {'status': job_status_code, 'msg': msg} def sort_train_images(self, *args, **kwargs): raise NotImplementedError() def train_model(self, *args, **kwargs): raise NotImplementedError() def sort_query_images(self, *args, **kwargs): raise NotImplementedError() def detect_objects(self, *args, **kwargs): raise NotImplementedError() def summarize_objects(self, *args, **kwargs): raise NotImplementedError() def check_training_images(self, image_dpath, annotation_path, annotation_format, class_label='NA'): images = [] if self.__norm_str(annotation_format) == 'coco': with open(annotation_path, 'r') as infh: image_records = json.load(infh) for f in image_records['images']: if os.path.exists(os.path.join(image_dpath, os.path.basename(f['file_name']))): images.append(f) elif self.__norm_str(annotation_format) == 'vott': with open(annotation_path, 'r') as infh: image_records = json.load(infh) for fid, f in image_records['assets'].items(): if os.path.exists(os.path.join(image_dpath, os.path.basename(f['asset']['name']))): images.append(f) elif (self.__norm_str(annotation_format) == 'mask') or ('voc' in self.__norm_str(annotation_format)): fdict = {} for f in glob.glob(os.path.join(image_dpath, '*')): fname = os.path.splitext(os.path.basename(f))[0] if os.path.splitext(f)[1].lower() in self.image_ext: if fname not in fdict: fdict[fname] = 0 fdict[fname] += 1 for f in glob.glob(os.path.join(annotation_path, '*')): fname = os.path.splitext(os.path.basename(f))[0] if fname not in fdict: fdict[fname] = 0 fdict[fname] += 1 for fname, fval in fdict.items(): if fval == 2: images.append(fname) else: raise NotImplementedError('JustDeepIt does not support {} format.'.format(annotation_format)) logger.info('There are {} images for model training.'.format(len(images))) with open(os.path.join(self.workspace_, 'data', 'train', 'train_images.txt'), 'w') as outfh: outfh.write('CLASS_LABEL\t{}\n'.format(class_label)) outfh.write('IMAGES_DPATH\t{}\n'.format(image_dpath)) outfh.write('ANNOTATION_FPATH\t{}\n'.format(annotation_path)) outfh.write('ANNOTATION_FORMAT\t{}\n'.format(annotation_format)) outfh.write('N_IMAGES\t{}\n'.format(len(images))) def check_query_images(self, image_dpath): images = [] for f in sorted(glob.glob(os.path.join(image_dpath, '**'), recursive=True)): if os.path.splitext(f)[1].lower() in self.image_ext: images.append(f) with open(os.path.join(self.workspace_, 'data', 'query', 'query_images.txt'), 'w') as outfh: for image in images: outfh.write('{}\n'.format(image)) logger.info('There are {} images for inference.'.format(len(images))) def seek_query_images(self): self.images = [] with open(os.path.join(self.workspace_, 'data', 'query', 'query_images.txt'), 'r') as infh: for _image in infh: _image_info = _image.replace('\n', '').split('\t') self.images.append(_image_info[0]) def __norm_str(self, x): return x.replace('-', '').replace(' ', '').lower()
PypiClean
/Another_One_Messenger_Server-0.9.tar.gz/Another_One_Messenger_Server-0.9/src/server/main_window.py
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QApplication, QLabel, QTableView from PyQt5.QtGui import QStandardItemModel, QStandardItem from PyQt5.QtCore import QTimer from server.stat_window import StatWindow from server.config_window import ConfigWindow from server.add_user import RegisterUser from server.remove_user import DelUserDialog class MainWindow(QMainWindow): '''Класс - основное окно сервера.''' def __init__(self, database, server, config): # Конструктор предка super().__init__() # База данных сервера self.database = database self.server_thread = server self.config = config # Ярлык выхода self.exitAction = QAction('Выход', self) self.exitAction.setShortcut('Ctrl+Q') self.exitAction.triggered.connect(qApp.quit) # Кнопка обновить список клиентов self.refresh_button = QAction('Обновить список', self) # Кнопка настроек сервера self.config_btn = QAction('Настройки сервера', self) # Кнопка регистрации пользователя self.register_btn = QAction('Регистрация пользователя', self) # Кнопка удаления пользователя self.remove_btn = QAction('Удаление пользователя', self) # Кнопка вывести историю сообщений self.show_history_button = QAction('История клиентов', self) # Статусбар self.statusBar() self.statusBar().showMessage('Server Working') # Тулбар self.toolbar = self.addToolBar('MainBar') self.toolbar.addAction(self.exitAction) self.toolbar.addAction(self.refresh_button) self.toolbar.addAction(self.show_history_button) self.toolbar.addAction(self.config_btn) self.toolbar.addAction(self.register_btn) self.toolbar.addAction(self.remove_btn) # Настройки геометрии основного окна # Поскольку работать с динамическими размерами мы не умеем, и мало # времени на изучение, размер окна фиксирован. self.setFixedSize(800, 600) self.setWindowTitle('Messaging Server alpha release') # Надпись о том, что ниже список подключённых клиентов self.label = QLabel('Список подключённых клиентов:', self) self.label.setFixedSize(240, 15) self.label.move(10, 25) # Окно со списком подключённых клиентов. self.active_clients_table = QTableView(self) self.active_clients_table.move(10, 45) self.active_clients_table.setFixedSize(780, 400) # Таймер, обновляющий список клиентов 1 раз в секунду self.timer = QTimer() self.timer.timeout.connect(self.create_users_model) self.timer.start(1000) # Связываем кнопки с процедурами self.refresh_button.triggered.connect(self.create_users_model) self.show_history_button.triggered.connect(self.show_statistics) self.config_btn.triggered.connect(self.server_config) self.register_btn.triggered.connect(self.reg_user) self.remove_btn.triggered.connect(self.rem_user) # Последним параметром отображаем окно. self.show() def create_users_model(self): '''Метод заполняющий таблицу активных пользователей.''' list_users = self.database.active_users_list() list = QStandardItemModel() list.setHorizontalHeaderLabels( ['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения']) for row in list_users: user, ip, port, time = row user = QStandardItem(user) user.setEditable(False) ip = QStandardItem(ip) ip.setEditable(False) port = QStandardItem(str(port)) port.setEditable(False) # Уберём милисекунды из строки времени, т.к. такая точность не # требуется. time = QStandardItem(str(time.replace(microsecond=0))) time.setEditable(False) list.appendRow([user, ip, port, time]) self.active_clients_table.setModel(list) self.active_clients_table.resizeColumnsToContents() self.active_clients_table.resizeRowsToContents() def show_statistics(self): '''Метод создающий окно со статистикой клиентов.''' global stat_window stat_window = StatWindow(self.database) stat_window.show() def server_config(self): '''Метод создающий окно с настройками сервера.''' global config_window # Создаём окно и заносим в него текущие параметры config_window = ConfigWindow(self.config) def reg_user(self): '''Метод создающий окно регистрации пользователя.''' global reg_window reg_window = RegisterUser(self.database, self.server_thread) reg_window.show() def rem_user(self): '''Метод создающий окно удаления пользователя.''' global rem_window rem_window = DelUserDialog(self.database, self.server_thread) rem_window.show()
PypiClean
/BicycleParameters-1.0.0.tar.gz/BicycleParameters-1.0.0/bicycleparameters/tables.py
from math import ceil class Table(): """A class for generating tables of the measurment and parameter data associated with a bicycle. """ def __init__(self, source, latex, bicycles): """Sets the basic attributes of the table. Parameters ---------- source : string One of the parameter types: `Measured` or `Benchmark` for now. latex : boolean If true, the variable names will be formatted with LaTeX. bicycles : tuple or list of Bicycles Bicycle objects of which their parameters should appear in the generated table. The order of the bicycles determines the order in the table. """ self.source = source self.bicycles = bicycles self.latex = latex # go ahead and calculate the base data, which sets allVariables and # tableData self.generate_variable_list() self.generate_table_data() def generate_variable_list(self): # generate a complete list of the variables allVariables = [] try: for bicycle in self.bicycles: allVariables += bicycle.parameters[self.source].keys() except TypeError: allVariables += self.bicycle.parameters[self.source].keys() # remove duplicates and sort self.allVariables = sorted(list(set(allVariables)), key=lambda x: x.lower()) def generate_table_data(self): """Generates a list of data for a table.""" table = [] for var in self.allVariables: # add a new line table.append([]) if self.latex: table[-1].append(to_latex(var)) else: table[-1].append(var) for bicycle in self.bicycles: try: val, sig = uround(bicycle.parameters[self.source][var]).split('+/-') except ValueError: val = str(bicycle.parameters[self.source][var]) sig = 'NA' except KeyError: val = 'NA' sig = 'NA' table[-1] += [val, sig] self.tableData = table def create_rst_table(self, fileName=None): """Returns a reStructuredText version of the table. Parameters ---------- fileName : string If a path to a file is given, the table will be written to that file. Returns ------- rstTable : string reStructuredText version of the table. """ table = self.tableData # add the math directive if using latex if self.latex: for i, row in enumerate(table): self.tableData[i][0] = ':math:`' + row[0] + '`' # add a sub header table.insert(0, ['Variable']) for i, bicycle in enumerate(self.bicycles): if self.latex: table[0] += [':math:`v`', ':math:`\sigma`'] else: table[0] += ['v', 'sigma'] # find the longest string in each column largest = [0] # the top left is empty for bicycle in self.bicycles: l = int(ceil(len(bicycle.bicycleName) / 2.0)) largest += [l, l] for row in table: colSize = [len(string) for string in row] for i, pair in enumerate(zip(colSize, largest)): if pair[0] > pair[1]: largest[i] = pair[0] # build the rst table rstTable = '+' + '-' * (largest[0] + 2) for i, bicycle in enumerate(self.bicycles): rstTable += '+' + '-' * (largest[2 * i + 1] + largest[2 * i + 2] + 5) rstTable += '+\n|' + ' ' * (largest[0] + 2) for i, bicycle in enumerate(self.bicycles): rstTable += '| ' + bicycle.bicycleName + ' ' * (largest[2 * i + 1] + largest[2 * i + 2] + 4 - len(bicycle.bicycleName)) rstTable += '|\n' for j, row in enumerate(table): if j == 0: dash = '=' else: dash = '-' line = '' for i in range(len(row)): line += '+' + dash * (largest[i] + 2) line += '+\n|' for i, item in enumerate(row): line += ' ' + item + ' ' * (largest[i] - len(item)) + ' |' line += '\n' rstTable += line for num in largest: rstTable += '+' + dash * (num + 2) rstTable += '+' if fileName is not None: f = open(fileName, 'w') f.write(rstTable) f.close() return rstTable def to_latex(var): """Returns a latex representation for a given variable string name. Parameters ---------- var : string One of the variable names used in the bicycleparameters package. Returns ------- latex : string A string formatting for pretty LaTeX math print. """ latexMap = {'f': 'f', 'w': 'w', 'gamma': '\gamma', 'g': 'g', 'lcs': 'l_{cs}', 'hbb': 'h_{bb}', 'lsp': 'l_{sp}', 'lst': 'l_{st}', 'lamst': '\lambda_{st}', 'whb': 'w_{hb}', 'LhbF': 'l_{hbF}', 'LhbR': 'l_{hbR}', 'd': 'd', 'l': 'l', 'c': 'c', 'lam': '\lambda', 'xcl': 'x_{cl}', 'zcl': 'z_{cl}', 'ds1': 'd_{s1}', 'ds3': 'd_{s3}'} try: latex = latexMap[var] except KeyError: if var.startswith('alpha'): latex = r'\alpha_{' + var[-2:] + '}' elif var.startswith('a') and len(var) == 3: latex = 'a_{' + var[-2:] + '}' elif var.startswith('T'): latex = 'T^' + var[1] + '_{' + var[-2:] + '}' elif len(var) == 2: latex = var[0] + '_' + var[1] elif var.startswith('I'): latex = var[0] + '_{' + var[1:] + '}' else: raise return latex def uround(value): '''Returns a string representation of a value with an uncertainity which has been rounded to significant digits based on the uncertainty value. Parameters ---------- value : ufloat A single ufloat. Returns ------- s : string A rounded string representation of `value`. 2.4563752289999+/-0.0003797273827 becomes 2.4564+/-0.0004 This probably doesn't work for weird cases like large uncertainties. ''' try: # grab the nominal value and the uncertainty nom = value.nominal_value except AttributeError: s = str(value) else: uncert = value.std_dev if abs(nom) < 1e-15: s = '0.0+/-0.0' else: # convert the uncertainty to a string s = '%.14f' % uncert # find the first non-zero character for j, number in enumerate(s): if number == '0' or number == '.': pass else: digit = j break newUncert = round(uncert, digit - 1) newUncertStr = ('%.' + str(digit - 1) + 'f') % newUncert newNom = round(nom, len(newUncertStr) - 2) newNomStr = ('%.' + str(digit - 1) + 'f') % newNom diff = len(newUncertStr) - len(newNomStr) if diff > 0: s = newNomStr + int(diff) * '0' + '+/-' + newUncertStr else: s = newNomStr + '+/-' + newUncertStr return s
PypiClean
/BayesDB-0.2.0.tar.gz/BayesDB-0.2.0/bayesdb/server_remote.py
from __future__ import print_function # # Copyright (c) 2011 Edward Langley # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the project's author nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # from twisted.web import server, resource # iweb # from twisted.web.resource import EncodingResourceWrapper # from twisted.internet import ssl # import traceback from twisted.internet import reactor from jsonrpc.server import ServerEvents, JSON_RPC from bayesdb.engine import Engine engine = Engine() from bayesdb.client import Client client = Client() class ExampleServer(ServerEvents): # inherited hooks def log(self, responses, txrequest, error): print(txrequest.code, end=' ') if isinstance(responses, list): for response in responses: msg = self._get_msg(response) print(txrequest, msg) else: msg = self._get_msg(responses) print(txrequest, msg) def findmethod(self, method, args=None, kwargs=None): return client.execute # helper methods methods = set([client.execute]) def _get_msg(self, response): ret_str = 'No id response: %s' % str(response) if hasattr(response, 'id'): ret_str = str(response.id) if response.result: ret_str += '; result: %s' % str(response.result) else: ret_str += '; error: %s' % str(response.error) for at in dir(response): if not at.startswith('__'): print(at + ": " + str(getattr(response, at))) print("response:\n" + str(dir(response))) return ret_str class CorsEncoderFactory(object): def encoderForRequest(self, request): request.setHeader("Access-Control-Allow-Origin", '*') request.setHeader("Access-Control-Allow-Methods", 'PUT, GET') return _CorsEncoder(request) class _CorsEncoder(object): """ @ivar _request: A reference to the originating request. @since: 12.3 """ def __init__(self, request): self._request = request def encode(self, data): return data def finish(self): return "" root = JSON_RPC().customize(ExampleServer) wrapped = resource.IResource(root, [CorsEncoderFactory()]) site = server.Site(wrapped) # 8008 is the port you want to run under. Choose something >1024 PORT = 8008 print('Listening on port %d...' % PORT) print('*Be sure to use ClientRemote from client_remote.py rather than the standard Client.') reactor.listenTCP(PORT, site) reactor.run()
PypiClean
/AWS_OOP_distributions-0.1.tar.gz/AWS_OOP_distributions-0.1/distributions/Gaussiandistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Gaussian(Distribution): """ Gaussian distribution class for calculating and visualizing a Gaussian distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats extracted from the data file """ def __init__(self, mu=0, sigma=1): Distribution.__init__(self, mu, sigma) def calculate_mean(self): """Function to calculate the mean of the data set. Args: None Returns: float: mean of the data set """ avg = 1.0 * sum(self.data) / len(self.data) self.mean = avg return self.mean def calculate_stdev(self, sample=True): """Function to calculate the standard deviation of the data set. Args: sample (bool): whether the data represents a sample or population Returns: float: standard deviation of the data set """ if sample: n = len(self.data) - 1 else: n = len(self.data) mean = self.calculate_mean() sigma = 0 for d in self.data: sigma += (d - mean) ** 2 sigma = math.sqrt(sigma / n) self.stdev = sigma return self.stdev def plot_histogram(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.hist(self.data) plt.title('Histogram of Data') plt.xlabel('data') plt.ylabel('count') def pdf(self, x): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) def plot_histogram_pdf(self, n_spaces = 50): """Function to plot the normalized histogram of the data and a plot of the probability density function along the same range Args: n_spaces (int): number of data points Returns: list: x values for the pdf plot list: y values for the pdf plot """ mu = self.mean sigma = self.stdev min_range = min(self.data) max_range = max(self.data) # calculates the interval between x values interval = 1.0 * (max_range - min_range) / n_spaces x = [] y = [] # calculate the x values to visualize for i in range(n_spaces): tmp = min_range + interval*i x.append(tmp) y.append(self.pdf(tmp)) # make the plots fig, axes = plt.subplots(2,sharex=True) fig.subplots_adjust(hspace=.5) axes[0].hist(self.data, density=True) axes[0].set_title('Normed Histogram of Data') axes[0].set_ylabel('Density') axes[1].plot(x, y) axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation') axes[0].set_ylabel('Density') plt.show() return x, y def __add__(self, other): """Function to add together two Gaussian distributions Args: other (Gaussian): Gaussian instance Returns: Gaussian: Gaussian distribution """ result = Gaussian() result.mean = self.mean + other.mean result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2) return result def __repr__(self): """Function to output the characteristics of the Gaussian instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}".format(self.mean, self.stdev)
PypiClean
/Cnc25D-0.1.10.tar.gz/Cnc25D-0.1.10/cnc25d/positioning.py
################################################################ # header for Python / FreeCAD compatibility ################################################################ import importing_freecad importing_freecad.importing_freecad() #print("FreeCAD.Version:", FreeCAD.Version()) #FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI ################################################################ # import ################################################################ import Part from FreeCAD import Base import math import sys, argparse import design_help # just for get_effective_args() ################################################################ # Positioning API ################################################################ def place_plank(ai_plank_solid, ai_x_length, ai_y_width, ai_z_height, ai_flip, ai_orientation, ai_translate_x, ai_translate_y, ai_translate_z): """ After creating a plank, use this function to place it in a cuboid construction """ r_placed_plank = ai_plank_solid #r_placed_plank = ai_plank_solid.copy() # flip flip_center_x = ai_x_length/2 flip_center_y = ai_y_width/2 flip_center_z = ai_z_height/2 if(ai_flip=='i'): r_placed_plank.rotate(Base.Vector(flip_center_x, flip_center_y, flip_center_z),Base.Vector(0,0,1),0) elif(ai_flip=='x'): r_placed_plank.rotate(Base.Vector(flip_center_x, flip_center_y, flip_center_z),Base.Vector(1,0,0),180) elif(ai_flip=='y'): r_placed_plank.rotate(Base.Vector(flip_center_x, flip_center_y, flip_center_z),Base.Vector(0,1,0),180) elif(ai_flip=='z'): r_placed_plank.rotate(Base.Vector(flip_center_x, flip_center_y, flip_center_z),Base.Vector(0,0,1),180) else: print("ERR505: Error, the flip value %s doesn't exist! Use only: i,x,y,z."%ai_flip) sys.exit(2) # orientation if(ai_orientation=='xy'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,0,1),0) r_placed_plank.translate(Base.Vector(0, 0, 0)) elif(ai_orientation=='xz'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(1,0,0),90) r_placed_plank.translate(Base.Vector(0, ai_z_height, 0)) elif(ai_orientation=='yx'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,0,1),90) r_placed_plank.translate(Base.Vector(ai_y_width, 0, 0)) elif(ai_orientation=='yz'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,0,1),90) r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,1,0),90) r_placed_plank.translate(Base.Vector(0, 0, 0)) elif(ai_orientation=='zx'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,1,0),-90) r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,0,1),-90) r_placed_plank.translate(Base.Vector(0, 0, 0)) elif(ai_orientation=='zy'): r_placed_plank.rotate(Base.Vector(0, 0, 0),Base.Vector(0,1,0),-90) r_placed_plank.translate(Base.Vector(ai_z_height, 0, 0)) else: print("ERR506: Error, the orientation value %s doesn't exist! Use only: xz,xy,yx,yz,zx,zy."%ai_orientation) sys.exit(2) # translation r_placed_plank.translate(Base.Vector(ai_translate_x, ai_translate_y, ai_translate_z)) return(r_placed_plank) ################################################################ # API testing ################################################################ def test_plank(): """ Plank example to test the place_plank function """ r_plank = Part.makeBox(20,4,2) r_plank = r_plank.cut(Part.makeBox(5,3,4, Base.Vector(16,-1,-1), Base.Vector(0,0,1))) r_plank = r_plank.cut(Part.makeBox(3,6,2, Base.Vector(18,-1,1), Base.Vector(0,0,1))) #Part.show(r_plank) return(r_plank) def positioning_test1(): """ test the place_plank function """ # test place_plank() #pp0 = test_plank() #Part.show(pp0) pp1 = place_plank(test_plank(), 20,4,2, 'i', 'xy', 300,0,0) Part.show(pp1) pp2 = place_plank(test_plank(), 20,4,2, 'x', 'xy', 300,30,0) Part.show(pp2) pp3 = place_plank(test_plank(), 20,4,2, 'y', 'xy', 300,60,0) Part.show(pp3) pp4 = place_plank(test_plank(), 20,4,2, 'z', 'xy', 300,90,0) Part.show(pp4) #pp4 = place_plank(test_plank(), 20,4,2, 'u', 'xy', 300,30,0) pp21 = place_plank(test_plank(), 20,4,2, 'i', 'xy', 350,0,0) Part.show(pp21) pp22 = place_plank(test_plank(), 20,4,2, 'i', 'xz', 350,30,0) Part.show(pp22) pp23 = place_plank(test_plank(), 20,4,2, 'i', 'yx', 350,60,0) Part.show(pp23) pp24 = place_plank(test_plank(), 20,4,2, 'i', 'yz', 350,90,0) Part.show(pp24) pp25 = place_plank(test_plank(), 20,4,2, 'i', 'zx', 350,120,0) Part.show(pp25) pp26 = place_plank(test_plank(), 20,4,2, 'i', 'zy', 350,150,0) Part.show(pp26) #pp27 = place_plank(test_plank(), 20,4,2, 'i', 'xx', 350,180,0) ##Part.show(pp1) r_test = 1 return(r_test) ################################################################ # positioning command line interface ################################################################ def positioning_cli(ai_args=""): """ it is the command line interface of positioning.py when it is used in standalone """ posi_parser = argparse.ArgumentParser(description='Test the positioning API') posi_parser.add_argument('--test1','--t1', action='store_true', default=False, dest='sw_test1', help='First test to check place_plank.') effective_args = design_help.get_effective_args(ai_args) posi_args = posi_parser.parse_args(effective_args) print("dbg111: start testing positioning.py") if(posi_args.sw_test1): positioning_test1() print("dbg999: end of script") ################################################################ # main ################################################################ # with freecad, the script is also main :) if __name__ == "__main__": FreeCAD.Console.PrintMessage("dbg109: I'm main\n") #positioning_cli() positioning_cli("--test1")
PypiClean
/Augmentor-0.2.12.tar.gz/Augmentor-0.2.12/README.md
![AugmentorLogo](https://github.com/mdbloice/AugmentorFiles/blob/master/Misc/AugmentorLogo.png) Augmentor is an image augmentation library in Python for machine learning. It aims to be a standalone library that is platform and framework independent, which is more convenient, allows for finer grained control over augmentation, and implements the most real-world relevant augmentation techniques. It employs a stochastic approach using building blocks that allow for operations to be pieced together in a pipeline. [![PyPI](https://img.shields.io/badge/Augmentor-v0.2.10-blue.svg?maxAge=2592000)](https://pypi.python.org/pypi/Augmentor) [![Supported Python Versions](https://img.shields.io/badge/python-2.7%20%7C%203.5%20%7C%203.6%20%7C%203.7%20%7C%203.8%20%7C%203.9-blue.svg)](https://pypi.python.org/pypi/Augmentor) [![PyPI Install](https://github.com/mdbloice/Augmentor/actions/workflows/PyPI.yml/badge.svg)](https://github.com/mdbloice/Augmentor/actions/workflows/PyPI.yml) [![Pytest](https://github.com/mdbloice/Augmentor/actions/workflows/package-tests.yml/badge.svg)](https://github.com/mdbloice/Augmentor/actions/workflows/package-tests.yml) [![Documentation Status](https://readthedocs.org/projects/augmentor/badge/?version=master)](https://augmentor.readthedocs.io/en/master/?badge=master) [![License](http://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat)](LICENSE.md) [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](http://www.repostatus.org/badges/latest/active.svg)](http://www.repostatus.org/#active) [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/4QuantOSS/Augmentor/master) ## Installation Augmentor is written in Python. A Julia version of the package is also being developed as a sister project and is available [here](https://github.com/Evizero/Augmentor.jl). Install using `pip` from the command line: ```python pip install Augmentor ``` See the documentation for building from source. To upgrade from a previous version, use `pip install Augmentor --upgrade`. ## Documentation Complete documentation can be found on Read the Docs: [https://augmentor.readthedocs.io](https://augmentor.readthedocs.io/en/stable/) ## Quick Start Guide and Usage The purpose of _Augmentor_ is to automate image augmentation (artificial data generation) in order to expand datasets as input for machine learning algorithms, especially neural networks and deep learning. The package works by building an augmentation **pipeline** where you define a series of operations to perform on a set of images. Operations, such as rotations or transforms, are added one by one to create an augmentation pipeline: when complete, the pipeline can be executed and an augmented dataset is created. To begin, instantiate a `Pipeline` object that points to a directory on your file system: ```python import Augmentor p = Augmentor.Pipeline("/path/to/images") ``` You can then add operations to the Pipeline object `p` as follows: ```python p.rotate(probability=0.7, max_left_rotation=10, max_right_rotation=10) p.zoom(probability=0.5, min_factor=1.1, max_factor=1.5) ``` Every function requires you to specify a probability, which is used to decide if an operation is applied to an image as it is passed through the augmentation pipeline. Once you have created a pipeline, you can sample from it like so: ```python p.sample(10000) ``` which will generate 10,000 augmented images based on your specifications. By default these will be written to the disk in a directory named `output` relative to the path specified when initialising the `p` pipeline object above. If you wish to process each image in the pipeline exactly once, use `process()`: ```python p.process() ``` This function might be useful for resizing a dataset for example. It would make sense to create a pipeline where all of its operations have their probability set to `1` when using the `process()` method. ### Multi-threading Augmentor (version >=0.2.1) now uses multi-threading to increase the speed of generating images. This *may* slow down some pipelines if the original images are very small. Set `multi_threaded` to ``False`` if slowdown is experienced: ```python p.sample(100, multi_threaded=False) ``` However, by default the `sample()` function uses multi-threading. This is currently only implemented when saving to disk. Generators will use multi-threading in the next version update. ### Ground Truth Data Images can be passed through the pipeline in groups of two or more so that ground truth data can be identically augmented. | Original image and mask<sup>[3]</sup> | Augmented original and mask images | |---------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------| | ![OriginalMask](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/original-with-mask.png) | ![AugmentedMask](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/ground-truth.gif) | To augment ground truth data in parallel to any original data, add a ground truth directory to a pipeline using the [ground_truth()](https://augmentor.readthedocs.io/en/master/code.html#Augmentor.Pipeline.Pipeline.ground_truth) function: ```python p = Augmentor.Pipeline("/path/to/images") # Point to a directory containing ground truth data. # Images with the same file names will be added as ground truth data # and augmented in parallel to the original data. p.ground_truth("/path/to/ground_truth_images") # Add operations to the pipeline as normal: p.rotate(probability=1, max_left_rotation=5, max_right_rotation=5) p.flip_left_right(probability=0.5) p.zoom_random(probability=0.5, percentage_area=0.8) p.flip_top_bottom(probability=0.5) p.sample(50) ``` ### Multiple Mask/Image Augmentation Using the `DataPipeline` class (Augmentor version >= 0.2.3), images that have multiple associated masks can be augmented: | Multiple Mask Augmentation | |----------------------------------------------------------------------------------------------------------| | ![MultipleMask](https://github.com/mdbloice/AugmentorFiles/blob/master/UsageGuide/merged-multi-mask.gif) | Arbitrarily long lists of images can be passed through the pipeline in groups and augmented identically using the `DataPipeline` class. This is useful for ground truth images that have several masks, for example. In the example below, the images and their masks are contained in the `images` data structure (as lists of lists), while their labels are contained in `y`: ```python p = Augmentor.DataPipeline(images, y) p.rotate(1, max_left_rotation=5, max_right_rotation=5) p.flip_top_bottom(0.5) p.zoom_random(1, percentage_area=0.5) augmented_images, labels = p.sample(100) ``` The `DataPipeline` returns images directly (`augmented_images` above), and does not save them to disk, nor does it read data from the disk. Images are passed directly to `DataPipeline` during initialisation. For details of the `images` data structure and how to create it, see the [`Multiple-Mask-Augmentation.ipynb`](https://github.com/mdbloice/Augmentor/blob/master/notebooks/Multiple-Mask-Augmentation.ipynb) Jupyter notebook. ### Generators for Keras and PyTorch If you do not wish to save to disk, you can use a generator (in this case with Keras): ```python g = p.keras_generator(batch_size=128) images, labels = next(g) ``` which returns a batch of images of size 128 and their corresponding labels. Generators return data indefinitely, and can be used to train neural networks with augmented data on the fly. Alternatively, you can integrate it with PyTorch: ```python import torchvision transforms = torchvision.transforms.Compose([ p.torch_transform(), torchvision.transforms.ToTensor(), ]) ``` ## Main Features ### Elastic Distortions Using elastic distortions, one image can be used to generate many images that are real-world feasible and label preserving: | Input Image | | Augmented Images | |-----------------------------------------------------------------------------------------------------------------------------------|---|-------------------------------------------------------------------------------------------------------------------------| | ![eight_hand_drawn_border](https://cloud.githubusercontent.com/assets/16042756/23697279/79850d52-03e7-11e7-9445-475316b702a3.png) | → | ![eights_border](https://cloud.githubusercontent.com/assets/16042756/23697283/802698a6-03e7-11e7-94b7-f0b61977ef33.gif) | The input image has a 1 pixel black border to emphasise that you are getting distortions without changing the size or aspect ratio of the original image, and without any black/transparent padding around the newly generated images. The functionality can be more clearly seen here: | Original Image<sup>[1]</sup> | Random distortions applied | |---------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![Distorted](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/distort.gif) | ### Perspective Transforms There are a total of 12 different types of perspective transform available. Four of the most common are shown below. | Tilt Left | Tilt Right | Tilt Forward | Tilt Backward | |---------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| | ![TiltLeft](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/TiltLeft_s.png) | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/TiltRight_s.png) | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/TiltForward_s.png) | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/TiltBackward_s.png) | The remaining eight types of transform are as follows: | Skew Type 0 | Skew Type 1 | Skew Type 2 | Skew Type 3 | |-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| | ![Skew0](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner0_s.png) | ![Skew1](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner1_s.png) | ![Skew2](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner2_s.png) | ![Skew3](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner3_s.png) | | Skew Type 4 | Skew Type 5 | Skew Type 6 | Skew Type 7 | |-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------| | ![Skew4](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner4_s.png) | ![Skew5](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner5_s.png) | ![Skew6](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner6_s.png) | ![Skew7](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/Corner7_s.png) | ### Size Preserving Rotations Rotations by default preserve the file size of the original images: | Original Image | Rotated 10 degrees, automatically cropped | |---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![Rotate](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/rotate_aug_b.png) | Compared to rotations by other software: | Original Image | Rotated 10 degrees | |---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![Rotate](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/rotate.png) | ### Size Preserving Shearing Shearing will also automatically crop the correct area from the sheared image, so that you have an image with no black space or padding. | Original image | Shear (x-axis) 20 degrees | Shear (y-axis) 20 degrees | |---------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![ShearX](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/shear_x_aug.png) | ![ShearY](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/shear_y_aug.png) | Compare this to how this is normally done: | Original image | Shear (x-axis) 20 degrees | Shear (y-axis) 20 degrees | |---------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![ShearX](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/shear_x.png) | ![ShearY](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/shear_y.png) | ### Cropping Cropping can also be handled in a manner more suitable for machine learning image augmentation: | Original image | Random crops + resize operation | |---------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/orig.png) | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/crop_resize.gif) | ### Random Erasing Random Erasing is a technique used to make models robust to occlusion. This may be useful for training neural networks used in object detection in navigation scenarios, for example. | Original image<sup>[2]</sup> | Random Erasing | |----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/city-road-street-italy-scaled.jpg) | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/city-road-street-italy-animation.gif) | See the [Pipeline.random_erasing()](https://augmentor.readthedocs.io/en/stable/code.html#Augmentor.Pipeline.Pipeline.random_erasing) documentation for usage. ### Chaining Operations in a Pipeline With only a few operations, a single image can be augmented to produce large numbers of new, label-preserving samples: | Original image | Distortions + mirroring | |----------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------| | ![Original](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/eight_200px.png) | ![DistortFlipFlop](https://raw.githubusercontent.com/mdbloice/AugmentorFiles/master/UsageGuide/flip_distort.gif) | In the example above, we have applied three operations: first we randomly distort the image, then we flip it horizontally with a probability of 0.5 and then vertically with a probability of 0.5. We then sample from this pipeline 100 times to create 100 new data. ```python p.random_distortion(probability=1, grid_width=4, grid_height=4, magnitude=8) p.flip_left_right(probability=0.5) p.flip_top_bottom(probability=0.5) p.sample(100) ``` ## Tutorial Notebooks ### Integration with Keras using Generators Augmentor can be used as a replacement for Keras' augmentation functionality. Augmentor can create a generator which produces augmented data indefinitely, according to the pipeline you have defined. See the following notebooks for details: - Reading images from a local directory, augmenting them at run-time, and using a generator to pass the augmented stream of images to a Keras convolutional neural network, see [`Augmentor_Keras.ipynb`](https://github.com/mdbloice/Augmentor/blob/master/notebooks/Augmentor_Keras.ipynb) - Augmenting data in-memory (in array format) and using a generator to pass these new images to the Keras neural network, see [`Augmentor_Keras_Array_Data.ipynb`](https://github.com/mdbloice/Augmentor/blob/master/notebooks/Augmentor_Keras_Array_Data.ipynb) ### Per-Class Augmentation Strategies Augmentor allows for pipelines to be defined per class. That is, you can define different augmentation strategies on a class-by-class basis for a given classification problem. See an example of this in the following Jupyter notebook: [`Per_Class_Augmentation_Strategy.ipynb`](https://github.com/mdbloice/Augmentor/blob/master/notebooks/Per_Class_Augmentation_Strategy.ipynb) ## Complete Example Let's perform an augmentation task on a single image, demonstrating the pipeline and several features of Augmentor. First import the package and initialise a Pipeline object by pointing it to a directory containing your images: ```python import Augmentor p = Augmentor.Pipeline("/home/user/augmentor_data_tests") ``` Now you can begin adding operations to the pipeline object: ```python p.rotate90(probability=0.5) p.rotate270(probability=0.5) p.flip_left_right(probability=0.8) p.flip_top_bottom(probability=0.3) p.crop_random(probability=1, percentage_area=0.5) p.resize(probability=1.0, width=120, height=120) ``` Once you have added the operations you require, you can sample images from this pipeline: ```python p.sample(100) ``` Some sample output: | Input Image<sup>[3]</sup> | | Augmented Images | |--------------------------------------------------------------------------------------------------------------------|---|---------------------------------------------------------------------------------------------------------------------| | ![Original](https://cloud.githubusercontent.com/assets/16042756/23019262/b696e3a6-f441-11e6-958d-17f18f2cd35e.jpg) | → | ![Augmented](https://cloud.githubusercontent.com/assets/16042756/23018832/cda6967e-f43f-11e6-9082-765c291f1fd6.gif) | The augmented images may be useful for a boundary detection task, for example. ## Licence and Acknowledgements Augmentor is made available under the terms of the MIT Licence. See [`Licence.md`](https://github.com/mdbloice/Augmentor/blob/master/LICENSE.md). [1] Checkerboard image obtained from Wikimedia Commons and is in the public domain: <https://commons.wikimedia.org/wiki/File:Checkerboard_pattern.svg> [2] Street view image is in the public domain: <http://stokpic.com/project/italian-city-street-with-shoppers/> [3] Skin lesion image obtained from the ISIC Archive: - Image id = 5436e3abbae478396759f0cf - Download: <https://isic-archive.com:443/api/v1/image/5436e3abbae478396759f0cf/download> You can use `urllib` to obtain the skin lesion image in order to reproduce the augmented images above: ```python >>> from urllib import urlretrieve >>> im_url = "https://isic-archive.com:443/api/v1/image/5436e3abbae478396759f0cf/download" >>> urlretrieve(im_url, "ISIC_0000000.jpg") ('ISIC_0000000.jpg', <httplib.HTTPMessage instance at 0x7f7bd949a950>) ``` Note: For Python 3, use `from urllib.request import urlretrieve`. Logo created at [LogoMakr.com](https://logomakr.com) ## Tests To run the automated tests, clone the repository and run: ```bash $ py.test -v ``` from the command line. To view the CI tests that are run after each commit, see <https://travis-ci.org/mdbloice/Augmentor>. ## Asciicast Click the preview below to view a video demonstration of Augmentor in use: [![asciicast](https://asciinema.org/a/105368.png)](https://asciinema.org/a/105368?autoplay=1&speed=3)
PypiClean
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/code_generation/templates/CodeTemplatesFrames.py
template_frame_guard_normal_main_block = """\ {% if frame_cache_identifier %} if (isFrameUnusable({{frame_cache_identifier}})) { Py_XDECREF({{frame_cache_identifier}}); #if _DEBUG_REFCOUNTS if ({{frame_cache_identifier}} == NULL) { count_active_frame_cache_instances += 1; } else { count_released_frame_cache_instances += 1; } count_allocated_frame_cache_instances += 1; #endif {{frame_cache_identifier}} = {{make_frame_code}}; #if _DEBUG_REFCOUNTS } else { count_hit_frame_cache_instances += 1; #endif } assert({{frame_cache_identifier}}->m_type_description == NULL); {{frame_identifier}} = {{frame_cache_identifier}}; {% else %} {{frame_identifier}} = {{make_frame_code}}; {% endif %} {% if frame_init_code %} {{frame_init_code}} {% endif %} {% if context_identifier %} {% if frame_cache_identifier %} // Mark the frame object as in use, ref count 1 will be up for reuse. Py_INCREF({{context_identifier}}->m_frame); assert(Py_REFCNT({{context_identifier}}->m_frame) == 2); // Frame stack {% endif %} {% if is_python34_or_later %} Nuitka_SetFrameGenerator({{context_identifier}}->m_frame, (PyObject *){{context_identifier}}); {% endif %} assert({{context_identifier}}->m_frame->m_frame.f_back == NULL); {% endif %} // Push the new frame as the currently active one, and we should be exclusively // owning it. {% if context_identifier %} pushFrameStackGeneratorCompiledFrame({{frame_identifier}}); {% else %} pushFrameStackCompiledFrame({{frame_identifier}}); {% endif %} assert(Py_REFCNT({{frame_identifier}}) == 2); {% if context_identifier and is_python3 %} // Store currently existing exception as the one to publish again when we // yield or yield from. STORE_{{context_identifier.upper()}}_EXCEPTION({{context_identifier}}); {% endif %} // Framed code: {{codes}} {% if context_identifier and is_python3 %} // Release exception attached to the frame DROP_{{context_identifier.upper()}}_EXCEPTION({{context_identifier}}); {% endif %} {% if needs_preserve %} // Restore frame exception if necessary. RESTORE_FRAME_EXCEPTION({{frame_identifier}}); {% endif %} {% if not context_identifier %} // Put the previous frame back on top. popFrameStack(); {% endif %} {% if frame_exit_code %} {{frame_exit_code}} {% endif %} goto {{no_exception_exit}}; """ template_frame_guard_normal_return_handler = """\ {{frame_return_exit}}: {% if needs_preserve %} RESTORE_FRAME_EXCEPTION({{frame_identifier}}); {% endif %} // Put the previous frame back on top. popFrameStack(); {% if frame_exit_code %} {{frame_exit_code}}s {% endif %} goto {{return_exit}}; """ template_frame_attach_locals = """\ Nuitka_Frame_AttachLocals( %(frame_identifier)s, %(type_description)s%(frame_variable_refs)s ); """ template_frame_guard_normal_exception_handler = """\ {{frame_exception_exit}}: {% if needs_preserve %} RESTORE_FRAME_EXCEPTION({{frame_identifier}}); {% endif %} if ({{exception_tb}} == NULL) { {{exception_tb}} = {{tb_making_code}}; } else if ({{exception_tb}}->tb_frame != &{{frame_identifier}}->m_frame) { {{exception_tb}} = ADD_TRACEBACK({{exception_tb}}, {{frame_identifier}}, {{exception_lineno}}); } {% if attach_locals_code %} // Attaches locals to frame if any. {{attach_locals_code}} {% endif %} {% if frame_cache_identifier %} // Release cached frame if used for exception. if ({{frame_identifier}} == {{frame_cache_identifier}}) { #if _DEBUG_REFCOUNTS count_active_frame_cache_instances -= 1; count_released_frame_cache_instances += 1; #endif Py_DECREF({{frame_cache_identifier}}); {{frame_cache_identifier}} = NULL; } {% endif %} assertFrameObject({{frame_identifier}}); // Put the previous frame back on top. popFrameStack(); {% if frame_exit_code %} {{frame_exit_code}} {% endif %} // Return the error. goto {{parent_exception_exit}}; """ # Coroutines and asyncgen do this template_frame_guard_generator_return_handler = """\ %(frame_return_exit)s:; #if PYTHON_VERSION >= 0x300 #if PYTHON_VERSION < 0x3b0 Py_CLEAR(EXC_TYPE_F(%(context_identifier)s)); #endif Py_CLEAR(EXC_VALUE_F(%(context_identifier)s)); #if PYTHON_VERSION < 0x3b0 Py_CLEAR(EXC_TRACEBACK_F(%(context_identifier)s)); #endif #endif goto %(return_exit)s; """ template_frame_guard_generator_exception_handler = """\ %(frame_exception_exit)s:; // If it's not an exit exception, consider and create a traceback for it. if (!EXCEPTION_MATCH_GENERATOR(%(exception_type)s)) { if (%(exception_tb)s == NULL) { %(exception_tb)s = %(tb_making)s; } else if (%(exception_tb)s->tb_frame != &%(frame_identifier)s->m_frame) { %(exception_tb)s = ADD_TRACEBACK(%(exception_tb)s, %(frame_identifier)s, %(exception_lineno)s); } %(attach_locals)s // Release cached frame if used for exception. if (%(frame_identifier)s == %(frame_cache_identifier)s) { #if _DEBUG_REFCOUNTS count_active_frame_cache_instances -= 1; count_released_frame_cache_instances += 1; #endif Py_DECREF(%(frame_cache_identifier)s); %(frame_cache_identifier)s = NULL; } assertFrameObject(%(frame_identifier)s); } #if PYTHON_VERSION >= 0x300 #if PYTHON_VERSION < 0x3b0 Py_CLEAR(EXC_TYPE_F(%(context_identifier)s)); #endif Py_CLEAR(EXC_VALUE_F(%(context_identifier)s)); #if PYTHON_VERSION < 0x3b0 Py_CLEAR(EXC_TRACEBACK_F(%(context_identifier)s)); #endif #endif // Return the error. goto %(parent_exception_exit)s; """ from . import TemplateDebugWrapper # isort:skip TemplateDebugWrapper.checkDebug(globals())
PypiClean
/MSM_PELE-1.1.1-py3-none-any.whl/AdaptivePELE/AdaptivePELE/clustering/clustering.py
from __future__ import absolute_import, division, print_function, unicode_literals from builtins import range import sys import glob import numpy as np import os import pickle from six import reraise as raise_ from AdaptivePELE.constants import blockNames from AdaptivePELE.utilities import utilities from AdaptivePELE.atomset import SymmetryContactMapEvaluator as sym from AdaptivePELE.atomset import RMSDCalculator from AdaptivePELE.atomset import atomset from AdaptivePELE.clustering import clusteringTypes from AdaptivePELE.clustering import thresholdcalculator from scipy import stats import heapq try: import networkx as nx NETWORK = True except ImportError: NETWORK = False class Clusters: def __init__(self): self.clusters = [] def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"clusters": self.clusters} return state def __setstate__(self, state): # Restore instance attributes self.clusters = state['clusters'] def __len__(self): return len(self.clusters) def addCluster(self, cluster): """ Add a new cluster :param cluster: Cluster object to insert :type cluster: :py:class:`.Cluster` """ self.clusters.append(cluster) def insertCluster(self, index, cluster): """ Insert a cluster in a specified index :param index: Positions at which insert the cluster :type index: int :param cluster: Cluster object to insert :type cluster: :py:class:`.Cluster` """ self.clusters.insert(index, cluster) def getNumberClusters(self): """ Get the number of clusters contained :returns: int -- Number of clusters contained """ return len(self.clusters) def getCluster(self, clusterNum): """ Get the cluster at position clusterNum :param clusterNum: Index of the cluster to retrieve :type clusterNum: int :returns: :py:class:`.Cluster` -- Cluster at position clusterNum """ return self.clusters[clusterNum] def printClusters(self, verbose=False): """ Print clusters information :param verbose: Flag to control the verbosity of the code (default is False) :type verbose: bool """ for i, cluster in enumerate(self.clusters): print("--------------") print("CLUSTER #%d" % i) print("--------------") cluster.printCluster(verbose) print("") def __getitem__(self, key): return self.clusters[key] def __setitem__(self, key, value): self.clusters[key] = value def __delitem__(self, key): del self.clusters[key] def __eq__(self, other): return self.clusters == other.clusters def __iter__(self): for cluster in self.clusters: yield cluster class ConformationNetwork: """ Object that contains the conformation network, a network with clusters as nodes and edges representing trantions between clusters. The network is stored using the networkx package[1] References ---------- .. [1] Networkx python package https://networkx.github.io """ def __init__(self): if NETWORK: self.network = nx.DiGraph() else: self.network = None def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"network": self.network} return state def __setstate__(self, state): # Restore instance attributes self.network = state['network'] if NETWORK and int(nx.__version__.split(".")[0]) > 1 and 'adj' in self.network.__dict__: for attr in ['node', 'adj', 'graph', 'pred', 'succ']: self.network.__dict__["_"+attr] = self.network.__dict__.pop(attr) def add_node(self, node, **kwargs): """ Add a node to the network (wrapper for networkx method) :param node: Name of the node :type node: int :param kwargs: Set or change attributes using key=value. :type kwargs: keyword arguments, optional """ if not NETWORK: return self.network.add_node(node, attr_dict=kwargs) def add_edge(self, source, target): """ Add an edge to the network (wrapper for networkx method) :param source: Name of the source node :type source: int :param target: Name of the target node :type target: int """ if not NETWORK: return if self.network.has_edge(source, target): self.network[source][target]['transition'] += 1 else: self.network.add_edge(source, target, transition=1) def writeConformationNetwork(self, path): """ Write the conformational network to file to visualize it :param path: Path where to write the network :type path: str """ if not NETWORK: sys.stderr.write("Package networkx not found! Could not write network\n") return nx.write_edgelist(self.network, path) def writeFDT(self, path): """ Write the first discovery tree to file in edgelist format to visualize it :param path: Path where to write the network :type path: str """ if not NETWORK: sys.stderr.write("Package networkx not found! Could not write network\n") return with open(path, "w") as fw: for node, data in self.network.nodes(data=True): if data['parent'] != 'root': fw.write("%d\t%d\n" % (data['parent'], node)) def createPathwayToCluster(self, clusterLeave): """ Retrace the FDT from a specific cluster to the root where it was discovered :param clusterLeave: End point of the pathway to reconstruct :type clusterLeave: int :returns: list -- List of snapshots conforming a pathway """ pathway = [] nodeLabel = clusterLeave while nodeLabel != "root": pathway.append(nodeLabel) nodeLabel = self.network.node[nodeLabel]['parent'] return pathway[::-1] class AltStructures: """ Helper class, each cluster will have an instance of AltStructures that will maintain a priority queue (pq) of alternative structures to spawn from encoded as tuples (priority, PDB). """ def __init__(self): self.altStructPQ = [] self.limitSize = 10 self.index = -1 def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"altStructPQ": self.altStructPQ, "limitSize": self.limitSize, "index": self.index} return state def __setstate__(self, state): # Restore instance attributes self.limitSize = state['limitSize'] self.altStructPQ = [(el[0], i, el[-1]) for i, el in enumerate(state['altStructPQ'])] self.index = state.get('index', len(self.altStructPQ)-1) def altSpawnSelection(self, centerPair): """ Select an alternative PDB from the cluster center to spawn from :param centerPair: Tuple with the population of the representative structure and the PDB of said structure :type centerPair: int, :py:class:`.PDB` :returns: :py:class:`.PDB`, tuple -- PDB of the strucutre selected to spawn and tuple consisting of (epoch, trajectory, snapshot) """ subpopulations = [i[0] for i in self.altStructPQ] totalSubpopulation = sum(subpopulations) # Create a list of the population distributed between the cluster # center and the alternative structures weights = 1.0/np.array([centerPair[0]-totalSubpopulation]+subpopulations) weights /= weights.sum() # This function only works on numpy >= 1.7, on life we have 1.6 # ind = np.random.choice(range(len(self.altStructPQ)), p=weights) # Add one to account for the cluster representative r = stats.rv_discrete(values=(range(self.sizePQ()+1), weights)) ind = r.rvs(size=10)[0] # The first value of the distribution is always the cluster center if ind == 0: print("cluster center") return centerPair[1], None else: # pick an alternative structure from the priority queue # The first element corresponds to the cluster center ind -= 1 print("alternative structure") return self.altStructPQ[ind][2].pdb, self.altStructPQ[ind][2].trajPosition def cleanPQ(self): """ Ensure that the alternative structures priority queue has no more elements than the limit in order to ensure efficiency """ if len(self.altStructPQ) < self.limitSize: return limit = len(self.altStructPQ) del self.altStructPQ[self.limitSize-limit:] def addStructure(self, PDB, threshold, resname, resnum, resChain, contactThreshold, similarityEvaluator, trajPosition): """ Perform a subclustering, with sub-clusters of size threshold/2 :param PDB: Structure to cluster :type PDB: :py:class:`.PDB` :param threshold: Size of the cluster :type threshold: float :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param contactThreshold: Distance at which to atoms are considered in contact :type contactThreshold: float :param similarityEvaluator: Object that determinates the similarity between two structures :type similarityEvaluator: :py:class:`.SimilarityEvaluator` :param trajPosition: Tuple of (epoch, trajectory, snapshot) that permit identifying the structure added :type trajPosition: int, int, int """ i = 0 for _, _, subCluster in self.altStructPQ: _, distance = similarityEvaluator.isElement(PDB, subCluster, resname, resnum, resChain, contactThreshold) if distance < subCluster.threshold/2.0: subCluster.addElement([]) del self.altStructPQ[i] heapq.heappush(self.altStructPQ, (subCluster.elements, self.updateIndex(), subCluster)) if len(self.altStructPQ) > 2*self.limitSize: self.cleanPQ() return i += 1 newCluster = Cluster(PDB, thresholdRadius=threshold, contactThreshold=contactThreshold, contactMap=similarityEvaluator.contactMap, trajPosition=trajPosition) heapq.heappush(self.altStructPQ, (1, self.updateIndex(), newCluster)) if len(self.altStructPQ) > 2*self.limitSize: self.cleanPQ() def updateIndex(self): """ Update the index which represents chronological order of entries in the priority queue :returns: int -- Index of the following element """ self.index += 1 return self.index def sizePQ(self): """ Get the number of sub-clusters stored in the priority queue :returns: int -- Number of sub-clusters stored in the priority queue """ return len(self.altStructPQ) class Cluster: """ A cluster contains a representative structure(pdb), the number of elements, its density, threshold, number of contacts, a contactMap(sometimes) and a metric """ def __init__(self, pdb, thresholdRadius=None, contactMap=None, contacts=None, metrics=None, metricCol=None, density=None, contactThreshold=8, altSelection=False, trajPosition=None): """ :param pdb: Pdb of the representative structure :type pdb: :py:class:`.PDB` :param thresholdRadius: Threshold of the cluster :type thresholdRadius: float :param contactMap: The contact map of the ligand and the protein :type contactMap: numpy.Array :param contacts: Ratio of the number of alpha carbons in contact with the ligand :type contacts: float :param metrics: Array of the metrics corresponding to the cluster :type metrics: numpy.Array :param metricCol: Column of the prefered metric :type metricCol: int :param density: Density of the cluster :type density: float :param contactThreshold: Distance between two atoms to be considered in contact (default 8) :type contactThreshold: float :param altSelection: Flag that controls wether to use the alternative structures (default 8) :type altSelection: bool :param trajPosition: Tuple of (epoch, trajectory, snapshot) that permit identifying the structure added :type trajPosition: int, int, int """ self.pdb = pdb self.altStructure = AltStructures() self.elements = 1 self.threshold = thresholdRadius self.density = density self.contacts = contacts self.contactMap = contactMap if metrics is None: metrics = [] self.metrics = metrics self.originalMetrics = metrics self.metricCol = metricCol self.contactThreshold = contactThreshold self.altSelection = altSelection self.trajPosition = trajPosition if self.threshold is None: self.threshold2 = None else: self.threshold2 = thresholdRadius*thresholdRadius def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"pdb": self.pdb, "altStructure": self.altStructure, "elements": self.elements, "threshold": self.threshold, "densitiy": self.density, "contacts": self.contacts, "contactMap": self.contactMap, "metrics": self.metrics, "metricCol": self.metricCol, "threshold2": self.threshold2, "contactThreshold": self.contactThreshold, "altSelection": self.altSelection, "originalMetrics": self.originalMetrics, "trajPosition": self.trajPosition} return state def __setstate__(self, state): # Restore instance attributes self.pdb = state['pdb'] self.altStructure = state.get('altStructure', AltStructures()) self.elements = state['elements'] self.threshold = state.get('threshold') self.density = state.get('density') self.contacts = state.get('contacts') self.contactMap = state.get('contactMap') self.metrics = state.get('metrics', []) self.originalMetrics = state.get('originalMetrics', []) self.metricCol = state.get('metricCol') self.threshold2 = state.get('threshold2') self.contactThreshold = state.get('contactThreshold', 8) self.altSelection = state.get('altSelection', False) self.trajPosition = state.get('trajPosition') def __len__(self): return self.elements def getMetric(self): """ Get the value of the prefered metric if present, otherwise return None :returns: float -- Value of the prefered metric """ if len(self.metrics) and self.metricCol is not None: return self.metrics[self.metricCol] else: return None def getMetricFromColumn(self, numcol): """ Get the value of the metric in column numcol if present, otherwise return None :param numcol: Column of the desired metric :type numcol: int :returns: float -- Value of the prefered metric """ if len(self.metrics): return self.metrics[numcol] else: return None def addElement(self, metrics): """ Add a new element to the cluster :param metrics: Array of metrics of the new structure :type metrics: numpy.Array """ self.elements += 1 if self.metrics is None: # Special case where cluster in created during clustering of # initial structures self.metrics = metrics return if self.originalMetrics is None: self.originalMetrics = metrics if len(metrics) and len(self.metrics): # Set all metrics to the minimum value self.metrics = np.minimum(self.metrics, metrics) def printCluster(self, verbose=False): """ Print cluster information :param verbose: Flag to control the verbosity of the code (default is False) :type verbose: bool """ if verbose: print(self.pdb.printAtoms()) print("Elements: ", self.elements) print("Metrics: ", self.metrics) if self.threshold != 0: print("Radius threshold: ", self.threshold) print("Number of contacts: %.2f" % self.contacts) def __str__(self): return "Cluster: elements=%d, threshold=%.3f, contacts=%.3f, density=%.3f" % (self.elements, self.threshold, self.contacts, self.density or 0.000) def writePDB(self, path): """ Write the pdb of the representative structure to file :param path: Filename of the file to write :type path: str """ if topology is None: topology = [] self.pdb.writePDB(str(path)) def getContacts(self): """ Get the contacts ratio of the cluster :returns: float -- contact ratio of the cluster """ return self.contacts def writeSpawningStructure(self, path): """ Write the pdb of the chosen structure to spawn :param path: Filename of the file to write :type path: str :returns int, int, int: Tuple of (epoch, trajectory, snapshot) that permit identifying the structure added """ if not self.altSelection or self.altStructure.sizePQ() == 0: print("cluster center") self.pdb.writePDB(str(path)) return self.trajPosition else: spawnStruct, trajPosition = self.altStructure.altSpawnSelection((self.elements, self.pdb)) spawnStruct.writePDB(str(path)) if trajPosition is None: trajPosition = self.trajPosition return trajPosition def __eq__(self, other): return self.pdb == other.pdb\ and self.elements == other.elements\ and self.threshold == other.threshold\ and self.contacts == other.contacts\ and np.allclose(self.metrics, other.metrics) class ClusteringEvaluator: def __init__(self): self.contactMap = None self.contacts = None def cleanContactMap(self): """ Clean the attributes to prepare for next iteration """ self.contactMap = None self.contacts = None class ContactsClusteringEvaluator(ClusteringEvaluator): def __init__(self, RMSDCalculator_object): """ Helper object to carry out the RMSD clustering :param RMSDCalculator: object that calculates the RMSD between two conformations :type RMSDCalculator: :py:class:`.RMSDCalculator` """ ClusteringEvaluator.__init__(self) self.RMSDCalculator = RMSDCalculator_object self.contacts = None # Only here for compatibility purpose self.contactMap = None def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"RMSDCalculator": self.RMSDCalculator, "contacts": self.contacts, "contactMap": self.contactMap} return state def __setstate__(self, state): # Restore instance attributes self.RMSDCalculator = state.get('RMSDCalculator', RMSDCalculator.RMSDCalculator()) self.contacts = state.get('contacts') self.contactMap = state.get('contactMap') def isElement(self, pdb, cluster, resname, resnum, resChain, contactThresholdDistance): """ Evaluate wether a conformation is a member of a cluster :param pdb: Structure to compare :type pdb: :py:class:`.PDB` :param cluster: Cluster to compare :type cluster: :py:class:`.Cluster` :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param contactThreshold: Distance between two atoms to be considered in contact (default 8) :type contactThreshold: float :returns: bool, float -- Whether the structure belong to the cluster and the distance between them """ dist = self.RMSDCalculator.computeRMSD(cluster.pdb, pdb) return dist < cluster.threshold, dist def checkAttributes(self, pdb, resname, resnum, resChain, contactThresholdDistance): """ Check wether all attributes are set for this iteration :param pdb: Structure to compare :type pdb: :py:class:`.PDB` :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param contactThreshold: Distance between two atoms to be considered in contact (default 8) :type contactThreshold: float """ if self.contacts is None: self.contacts = pdb.countContacts(resname, contactThresholdDistance, resnum, resChain) def getInnerLimit(self, cluster): """ Return the threshold of the cluster :param cluster: Cluster to compare :type cluster: :py:class:`.Cluster` :returns: float -- Threshold of the cluster """ return cluster.threshold2 class CMClusteringEvaluator(ClusteringEvaluator): limitSlope = {8: 6, 6: 15, 4: 60, 10: 3} limitMax = {8: 2, 6: 0.8, 4: 0.2, 10: 4} def __init__(self, similarityEvaluator, symmetryEvaluator): """ Helper object to carry out the RMSD clustering :param similarityEvaluator: object that calculates the similarity between two contact maps :type similarityEvaluator: :py:class:`.CMSimilarityEvaluator` :param symmetryEvaluator: object to introduce the symmetry in the contacts maps :type symmetryEvaluator: :py:class:`.SymmetryContactMapEvaluator` """ ClusteringEvaluator.__init__(self) self.similarityEvaluator = similarityEvaluator self.symmetryEvaluator = symmetryEvaluator self.contacts = None self.contactMap = None def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"similarityEvaluator": self.similarityEvaluator, "symmetryEvaluator": self.symmetryEvaluator, "contacts": self.contacts, "contactMap": self.contactMap} return state def __setstate__(self, state): # Restore instance attributes self.similarityEvaluator = state.get('similarityEvaluator') self.symmetryEvaluator = state.get('symmetryEvaluator') self.contacts = state.get('contacts') self.contactMap = state.get('contactMap') def isElement(self, pdb, cluster, resname, resnum, resChain, contactThresholdDistance): """ Evaluate wether a conformation is a member of a cluster :param pdb: Structure to compare :type pdb: :py:class:`.PDB` :param cluster: Cluster to compare :type cluster: :py:class:`.Cluster` :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param contactThreshold: Distance between two atoms to be considered in contact (default 8) :type contactThreshold: float :returns: bool, float -- Whether the structure belong to the cluster and the distance between them """ if self.contactMap is None: self.contactMap, self.contacts = self.symmetryEvaluator.createContactMap(pdb, resname, contactThresholdDistance, resnum, resChain) # self.contactMap, foo = self.symmetryEvaluator.createContactMap(pdb, resname, contactThresholdDistance) # self.contacts = pdb.countContacts(resname, 8) # contactThresholdDistance) distance = self.similarityEvaluator.isSimilarCluster(self.contactMap, cluster.contactMap, self.symmetryEvaluator) return distance < cluster.threshold, distance def checkAttributes(self, pdb, resname, resnum, resChain, contactThresholdDistance): """ Check wether all attributes are set for this iteration :param pdb: Structure to compare :type pdb: :py:class:`.PDB` :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param contactThreshold: Distance between two atoms to be considered in contact (default 8) :type contactThreshold: float """ if self.contactMap is None: self.contactMap, self.contacts = self.symmetryEvaluator.createContactMap(pdb, resname, contactThresholdDistance, resnum, resChain) # self.contactMap, foo = self.symmetryEvaluator.createContactMap(pdb, resname, contactThresholdDistance) # self.contacts = pdb.countContacts(resname, 8) # contactThresholdDistance) def getInnerLimit(self, cluster): """ Return the threshold of the cluster :param cluster: Cluster to compare :type cluster: :py:class:`.Cluster` :returns: float -- Threshold of the cluster """ # if cluster.contacts > self.limitMax[cluster.contactThreshold]: # return 4.0 # else: # return 16-self.limitSlope[cluster.contactThreshold]*cluster.contacts # if cluster.contacts > 2.0: # return 4.0 # elif cluster.contacts <= 0.5: # return 25.0 # else: # return 25-14*(cluster.contacts-0.5) if cluster.contacts > 2.0: return 4.0 elif cluster.contacts < 0.5: return 25.0 else: return 16-8*(cluster.contacts-0.5) # if cluster.contacts > 1.0: # return 4.0 # elif cluster.contacts > 0.75: # return 9.0 # elif cluster.contacts > 0.5: # return 16.0 # else: # return 25 class Clustering: def __init__(self, resname="", resnum=0, resChain="", reportBaseFilename=None, columnOfReportFile=None, contactThresholdDistance=8, altSelection=False): """ Base class for clustering methods, it defines a cluster method that contacts and accumulative inherit and use :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param reportBaseFilename: Name of the file that contains the metrics of the snapshots to cluster :type reportBaseFilename: str :param columnOfReportFile: Column of the report file that contain the metric of interest :type columnOfReportFile: int :param contactThresholdDistance: Distance at wich a ligand atom and a protein atom are considered in contact(default 8) :type contactThresholdDistance: float """ self.type = "BaseClass" self.clusters = Clusters() if reportBaseFilename: self.reportBaseFilename = reportBaseFilename + "_%d" else: self.reportBaseFilename = None self.resname = resname self.resnum = resnum self.resChain = resChain self.col = columnOfReportFile self.contactThresholdDistance = contactThresholdDistance self.symmetries = [] self.altSelection = altSelection self.conformationNetwork = ConformationNetwork() self.epoch = -1 def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"type": self.type, "clusters": self.clusters, "reportBaseFilename": self.reportBaseFilename, "resname": self.resname, "resnum": self.resnum, "resChain": self.resChain, "col": self.col, "epoch": self.epoch, "symmetries": self.symmetries, "conformationNetwork": self.conformationNetwork, "contactThresholdDistance": self.contactThresholdDistance, "altSelection": self.altSelection} return state def __setstate__(self, state): # Restore instance attributes self.type = state['type'] self.clusters = state['clusters'] self.reportBaseFilename = state.get('reportBaseFilename') self.resname = state.get('resname', "") self.resnum = state.get('resnum', 0) self.resChain = state.get('resChain', "") self.col = state.get('col') self.contactThresholdDistance = state.get('contactThresholdDistance', 8) self.symmetries = state.get('symmetries', []) self.altSelection = state.get('altSelection', False) self.conformationNetwork = state.get('conformationNetwork', ConformationNetwork()) self.epoch = state.get('epoch', -1) def __str__(self): return "Clustering: nClusters: %d" % len(self.clusters) def __len__(self): return len(self.clusters) def __iter__(self): for cluster in self.clusters: yield cluster def __getitem__(self, key): return self.clusters[key] def __setitem__(self, key, value): self.clusters[key] = value def __delitem__(self, key): del self.clusters[key] def setCol(self, col): """ Set the column of the prefered column to col :param col: Column of the prefered column :type col: int """ self.col = col for cluster in self.clusters.clusters: cluster.metricCol = col def getCluster(self, clusterNum): """ Get the cluster at index clusterNum :returns: :py:class:`.Cluster` -- Cluster at clusterNum """ return self.clusters.getCluster(clusterNum) def emptyClustering(self): """ Delete previous results of clustering object """ self.clusters = Clusters() self.conformationNetwork = ConformationNetwork() self.epoch = -1 def clusterIterator(self): """ Iterator over the clusters """ # TODO: may be interesting to add some condition to filter, check # itertools module, its probably implemented for cluster in self.clusters.clusters: yield cluster def getNumberClusters(self): """ Get the number of clusters :returns: int -- Number of clusters """ return self.clusters.getNumberClusters() def __eq__(self, other): return self.clusters == other.clusters\ and self.reportBaseFilename == other.reportBaseFilename\ and self.resname == other.resname\ and self.resnum == other.resnum\ and self.resChain == other.resChain\ and self.col == other.col def cluster(self, paths, ignoreFirstRow=False, topology=None): """ Cluster the snaptshots contained in the paths folder :param paths: List of folders with the snapshots :type paths: list :param topology: Topology file for non-pdb trajectories :type topology: str """ self.epoch += 1 if topology is None: topology_contents = None else: topology_contents = utilities.getTopologyFile(topology) trajectories = getAllTrajectories(paths) for trajectory in trajectories: trajNum = utilities.getTrajNum(trajectory) # origCluster = processorsToClusterMapping[trajNum-1] origCluster = None snapshots = utilities.getSnapshots(trajectory, True, topology=topology) if self.reportBaseFilename: reportFilename = os.path.join(os.path.split(trajectory)[0], self.reportBaseFilename % trajNum) metrics = np.loadtxt(reportFilename, ndmin=2) for num, snapshot in enumerate(snapshots): if ignoreFirstRow and num == 0: continue try: origCluster = self.addSnapshotToCluster(trajNum, snapshot, origCluster, num, metrics[num], self.col, topology=topology_contents) except IndexError as e: message = (" in trajectory %d. This is usually caused by a mismatch between report files and trajectory files" " which in turn is usually caused by some problem in writing the files, e.g. quota") # raise a new exception of the same type, with the same # traceback but with an added message raise_(IndexError, (str(e) + message % trajNum), sys.exc_info()[2]) else: for num, snapshot in enumerate(snapshots): if ignoreFirstRow and num == 0: continue origCluster = self.addSnapshotToCluster(trajNum, snapshot, origCluster, num, topology=topology_contents) for cluster in self.clusters.clusters: cluster.altStructure.cleanPQ() def writeOutput(self, outputPath, degeneracy, outputObject, writeAll): """ Writes all the clustering information in outputPath :param outputPath: Folder that will contain all the clustering information :type outputPath: str :param degeneracy: Degeneracy of each cluster. It must be in the same order as in the self.clusters list :type degeneracy: list :param outputObject: Output name for the pickle object :type outputObject: str :param writeAll: Wether to write pdb files for all cluster in addition of the summary :type writeAll: bool """ utilities.cleanup(outputPath) utilities.makeFolder(outputPath) summaryFilename = os.path.join(outputPath, "summary.txt") with open(summaryFilename, 'w') as summaryFile: summaryFile.write("#cluster size degeneracy contacts threshold density metric\n") for i, cluster in enumerate(self.clusters.clusters): if writeAll: outputFilename = "cluster_%d.pdb" % i outputFilename = os.path.join(outputPath, outputFilename) cluster.writePDB(outputFilename) metric = cluster.getMetric() if metric is None: metric = "-" else: metric = "%.3f" % metric degeneracy_cluster = 0 if degeneracy is not None: # degeneracy will be None if null spawning is used degeneracy_cluster = degeneracy[i] writeString = "%d %d %d %.2f %.4f %.1f %s\n" % (i, cluster.elements, degeneracy_cluster, cluster.contacts, cluster.threshold, cluster.density or 1.0, metric) summaryFile.write(writeString) with open(outputObject, 'wb') as f: pickle.dump(self, f, 2) def addSnapshotToCluster(self, trajNum, snapshot, origCluster, snapshotNum, metrics=None, col=None, topology=None): """ Cluster a snapshot using the leader algorithm :param trajNum: Trajectory number :type trajNum: int :param snapshot: Snapshot to add :type snapshot: str :param origCluster: Cluster found in the previos snapshot :type origCluster: int :param snapshotNum: Number of snapshot in its trajectory :type snapshotNum: int :param metrics: Array with the metrics of the snapshot :type metrics: numpy.Array :param col: Column of the desired metrics :type col: int :returns: int -- Cluster to which the snapshot belongs :param topology: Topology for non-pdb trajectories :type topology: list """ if metrics is None: metrics = [] pdb = atomset.PDB() pdb.initialise(snapshot, resname=self.resname, resnum=self.resnum, chain=self.resChain, topology=topology) self.clusteringEvaluator.cleanContactMap() for clusterNum, cluster in enumerate(self.clusters.clusters): scd = atomset.computeSquaredCentroidDifference(cluster.pdb, pdb) if scd > self.clusteringEvaluator.getInnerLimit(cluster): continue isSimilar, dist = self.clusteringEvaluator.isElement(pdb, cluster, self.resname, self.resnum, self.resChain, self.contactThresholdDistance) if isSimilar: if dist > cluster.threshold/2.0: cluster.altStructure.addStructure(pdb, cluster.threshold, self.resname, self.resnum, self.resChain, self.contactThresholdDistance, self.clusteringEvaluator, trajPosition=(self.epoch, trajNum, snapshotNum)) cluster.addElement(metrics) if origCluster is None: origCluster = clusterNum self.conformationNetwork.add_edge(origCluster, clusterNum) return clusterNum # if made it here, the snapshot was not added into any cluster # Check if contacts and contactMap are set (depending on which kind # of clustering) self.clusteringEvaluator.checkAttributes(pdb, self.resname, self.resnum, self.resChain, self.contactThresholdDistance) contacts = self.clusteringEvaluator.contacts numberOfLigandAtoms = pdb.getNumberOfAtoms() contactsPerAtom = contacts/numberOfLigandAtoms threshold = self.thresholdCalculator.calculate(contactsPerAtom) cluster = Cluster(pdb, thresholdRadius=threshold, contacts=contactsPerAtom, contactMap=self.clusteringEvaluator.contactMap, metrics=metrics, metricCol=col, contactThreshold=self.contactThresholdDistance, altSelection=self.altSelection, trajPosition=(self.epoch, trajNum, snapshotNum)) self.clusters.addCluster(cluster) clusterNum = self.clusters.getNumberClusters()-1 if clusterNum == origCluster or origCluster is None: origCluster = clusterNum # The clusterNum should only be equal to origCluster when the first # cluster is created and the clusterInitialStructures function has # not been called, i.e. when usind the compareClustering script self.conformationNetwork.add_node(clusterNum, parent='root', epoch=self.epoch) else: self.conformationNetwork.add_node(clusterNum, parent=origCluster, epoch=self.epoch) self.conformationNetwork.add_edge(origCluster, clusterNum) # If a new cluster is discovered during a trajectory, the next step in # the same trajectory will be considered to start from these new # cluster, thus resulting in a more precise conformation network and # smoother pathways return clusterNum def writeClusterMetric(self, path, metricCol): """ Write the metric of each node in the conformation network in a tab-separated file :param path: Path where to write the network :type path: str :param metricCol: Column of the metric of interest :type metricCol: int """ with open(path, "w") as f: for i, cluster in enumerate(self.clusters.clusters): metric = cluster.getMetricFromColumn(metricCol) if metric is None: f.write("%d\t-\n" % i) else: f.write("%d\t%.4f\n" % (i, metric)) def writeConformationNodePopulation(self, path): """ Write the population of each node in the conformation network in a tab-separated file :param path: Path where to write the network :type path: str """ with open(path, "w") as f: for i, cluster in enumerate(self.clusters.clusters): f.write("%d\t%d\n" % (i, cluster.elements)) def getOptimalMetric(self, column=None, simulationType="min"): """ Find the cluster with the best metric :param column: Column of the metric that defines the best cluster, if not specified, the cluster metric is chosen :type column: int :param simulationType: Define optimal metric as the maximum or minimum, max or min :type simulationType: str :returns: int -- Number of cluster with the optimal metric """ metrics = [] for _, cluster in enumerate(self.clusters.clusters): if column is None: metric = cluster.getMetric() else: metric = cluster.getMetricFromColumn(column) metrics.append(metric) if simulationType.lower() == "min": optimalMetricIndex = np.argmin(metrics) elif simulationType.lower() == "max": optimalMetricIndex = np.argmax(metrics) else: raise ValueError("Unrecognized type simulation parameter!!! Possible values are max or min") return optimalMetricIndex def writePathwayTrajectory(self, pathway, filename, topology=None): """ Write a list of cluster forming a pathway into a trajectory pdb file :param pathway: List of clusters that form the pathway :type pathway: list :param filename: Path where to write the trajectory :type filename: str :param topology: Lines of topology file :type topology: list """ if topology is None: topology = [] with open(filename, "w") as pathwayFile: pathwayFile.write("REMARK 000 File created using PELE++\n") pathwayFile.write("REMARK 000 Pathway trajectory created using the FDT\n") pathwayFile.write("REMARK 000 List of cluster belonging to the pathway %s\n" % ' '.join(map(str, pathway))) for i, step_cluster in enumerate(pathway): cluster = self.clusters.clusters[step_cluster] pathwayFile.write("MODEL %d\n" % (i+1)) pdbStr = cluster.pdb.get_pdb_string() pdbList = pdbStr.split("\n") for line in pdbList: line = line.strip() # Avoid writing previous REMARK block if line.startswith("REMARK ") or line.startswith("MODEL ") or line == "END": continue elif line: pathwayFile.write(line+"\n") pathwayFile.write("ENDMDL\n") def writePathwayOptimalCluster(self, filename): """ Extract the pathway to the cluster with the best metric as a trajectory and write it to a PDB file :param filename: Path where to write the trajectory :type filename: str """ optimalCluster = self.getOptimalMetric() pathway = self.createPathwayToCluster(optimalCluster) self.writePathwayTrajectory(pathway, filename) class ContactsClustering(Clustering): def __init__(self, thresholdCalculator, resname="", resnum=0, resChain="", reportBaseFilename=None, columnOfReportFile=None, contactThresholdDistance=8, symmetries=None, altSelection=False): """ Cluster together all snapshots that are closer to the cluster center than certain threshold. This threshold is assigned according to the ratio of number of contacts over the number of heavy atoms of the ligand :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param thresholdCalculator: ThresholdCalculator object that calculate the threshold according to the contacts ratio :type thresholdCalculator: :py:class:`.ThresholdCalculator` :param reportBaseFilename: Name of the file that contains the metrics of the snapshots to cluster :type reportBaseFilename: str :param columnOfReportFile: Column of the report file that contain the metric of interest :type columnOfReportFile: int :param contactThresholdDistance: Distance at wich a ligand atom and a protein atom are considered in contact(default 8) :type contactThresholdDistance: float :param symmetries: List of symmetric groups :type symmetries: list :param altSelection: Flag that controls wether to use the alternative structures (default 8) :type altSelection: bool """ Clustering.__init__(self, resname=resname, resnum=resnum, resChain=resChain, reportBaseFilename=reportBaseFilename, columnOfReportFile=columnOfReportFile, contactThresholdDistance=contactThresholdDistance, altSelection=altSelection) self.type = clusteringTypes.CLUSTERING_TYPES.rmsd self.thresholdCalculator = thresholdCalculator if symmetries is None: symmetries = [] self.symmetries = symmetries self.clusteringEvaluator = ContactsClusteringEvaluator(RMSDCalculator.RMSDCalculator(symmetries)) def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"type": self.type, "clusters": self.clusters, "reportBaseFilename": self.reportBaseFilename, "resname": self.resname, "resnum": self.resnum, "resChain": self.resChain, "col": self.col, "epoch": self.epoch, "symmetries": self.symmetries, "conformationNetwork": self.conformationNetwork, "contactThresholdDistance": self.contactThresholdDistance, "altSelection": self.altSelection, "thresholdCalculator": self.thresholdCalculator, "clusteringEvaluator": self.clusteringEvaluator} return state def __setstate__(self, state): # Restore instance attributes self.type = state['type'] self.clusters = state['clusters'] self.reportBaseFilename = state.get('reportBaseFilename') self.resname = state.get('resname', "") self.resnum = state.get('resnum', 0) self.resChain = state.get('resChain', "") self.col = state.get('col') self.contactThresholdDistance = state.get('contactThresholdDistance', 8) self.symmetries = state.get('symmetries', []) self.altSelection = state.get('altSelection', False) self.conformationNetwork = state.get('conformationNetwork', ConformationNetwork()) self.epoch = state.get('epoch', -1) self.thresholdCalculator = state.get('thresholdCalculator', thresholdcalculator.ThresholdCalculatorConstant()) if isinstance(self.symmetries, dict): self.symmetries = [self.symmetries] self.clusteringEvaluator = state.get('clusteringEvaluator', ContactsClusteringEvaluator(RMSDCalculator.RMSDCalculator(self.symmetries))) class ContactMapAccumulativeClustering(Clustering): def __init__(self, thresholdCalculator, similarityEvaluator, resname="", resnum=0, resChain="", reportBaseFilename=None, columnOfReportFile=None, contactThresholdDistance=8, symmetries=None, altSelection=False): """ Cluster together all snapshots that have similar enough contactMaps. This similarity can be calculated with different methods (see similariyEvaluator documentation) :param thresholdCalculator: ThresholdCalculator object that calculate the threshold according to the contacts ratio :type thresholdCalculator: :py:class:`.ThresholdCalculator` :param similarityEvaluator: object that calculates the similarity between two contact maps :type similarityEvaluator: object :param resname: String containing the three letter name of the ligand in the pdb :type resname: str :param resnum: Integer containing the residue number of the ligand in the pdb :type resnum: int :param resChain: String containing the chain name of the ligand in the pdb :type resChain: str :param reportBaseFilename: Name of the file that contains the metrics of the snapshots to cluster :type reportBaseFilename: str :param columnOfReportFile: Column of the report file that contain the metric of interest :type columnOfReportFile: int :param contactThresholdDistance: Distance at wich a ligand atom and a protein atom are considered in contact(default 8) :type contactThresholdDistance: float :param symmetries: List of symmetric groups :type symmetries: list :param altSelection: Flag that controls wether to use the alternative structures (default 8) :type altSelection: bool """ if symmetries is None: symmetries = [] Clustering.__init__(self, resname=resname, resnum=resnum, resChain=resChain, reportBaseFilename=reportBaseFilename, columnOfReportFile=columnOfReportFile, contactThresholdDistance=contactThresholdDistance, altSelection=altSelection) self.type = clusteringTypes.CLUSTERING_TYPES.contactMap self.thresholdCalculator = thresholdCalculator self.similarityEvaluator = similarityEvaluator self.symmetryEvaluator = sym.SymmetryContactMapEvaluator(symmetries) self.clusteringEvaluator = CMClusteringEvaluator(similarityEvaluator, self.symmetryEvaluator) def __getstate__(self): # Defining pickling interface to avoid problems when working with old # simulations if the properties of the clustering-related classes have # changed state = {"type": self.type, "clusters": self.clusters, "reportBaseFilename": self.reportBaseFilename, "resname": self.resname, "resnum": self.resnum, "resChain": self.resChain, "col": self.col, "epoch": self.epoch, "symmetries": self.symmetries, "conformationNetwork": self.conformationNetwork, "contactThresholdDistance": self.contactThresholdDistance, "altSelection": self.altSelection, "thresholdCalculator": self.thresholdCalculator, "similariyEvaluator": self.similarityEvaluator, "symmetryEvaluator": self.symmetryEvaluator, "clusteringEvaluator": self.clusteringEvaluator} return state def __setstate__(self, state): # Restore instance attributes self.type = state['type'] self.clusters = state['clusters'] self.reportBaseFilename = state.get('reportBaseFilename') self.resname = state.get('resname', "") self.resnum = state.get('resnum', 0) self.resChain = state.get('resChain', "") self.col = state.get('col') self.contactThresholdDistance = state.get('contactThresholdDistance', 8) self.symmetries = state.get('symmetries', []) self.altSelection = state.get('altSelection', False) self.conformationNetwork = state.get('conformationNetwork', ConformationNetwork()) self.epoch = state.get('epoch', -1) self.thresholdCalculator = state.get('thresholdCalculator', thresholdcalculator.ThresholdCalculatorConstant(value=0.3)) self.similarityEvaluator = state.get('similariyEvaluator', CMSimilarityEvaluator(blockNames.ClusteringTypes.Jaccard)) self.symmetryEvaluator = state.get('symmetryEvaluator', sym.SymmetryContactMapEvaluator(self.symmetries)) self.clusteringEvaluator = state.get('clusteringEvaluator', CMClusteringEvaluator(self.similarityEvaluator, self.symmetryEvaluator)) class SequentialLastSnapshotClustering(Clustering): """ Assigned the last snapshot of the trajectory to a cluster. Only useful for PELE sequential runs """ def cluster(self, paths, topology=None): """ Cluster the snaptshots contained in the paths folder :param paths: List of folders with the snapshots :type paths: list :param topology: Topology file for non-pdb trajectories :type topology: str """ # Clean clusters at every step, so we only have the last snapshot of # each trajectory as clusters if topology is None: topology_contents = None else: topology_contents = utilities.getTopologyFile(topology) self.clusters = Clusters() trajectories = getAllTrajectories(paths) for trajectory in trajectories: trajNum = utilities.getTrajNum(trajectory) snapshots = utilities.getSnapshots(trajectory, True, topology=topology) if self.reportBaseFilename: reportFilename = os.path.join(os.path.split(trajectory)[0], self.reportBaseFilename % trajNum) metrics = np.loadtxt(reportFilename, ndmin=2) # Pass as cluster metrics the minimum value for each metric, # thus the metrics are not valid to do any spawning, only to # check the exit condition metrics = metrics.min(axis=0) self.addSnapshotToCluster(snapshots[-1], metrics, self.col, topology=topology_contents) else: self.addSnapshotToCluster(snapshots[-1], topology=topology_contents) def addSnapshotToCluster(self, snapshot, metrics=None, col=None, topology=None): """ Cluster a snapshot using the leader algorithm :param trajNum: Trajectory number :type trajNum: int :param snapshot: Snapshot to add :type snapshot: str :param metrics: Array with the metrics of the snapshot :type metrics: numpy.Array :param col: Column of the desired metrics :type col: int :returns: int -- Cluster to which the snapshot belongs :param topology: Topology for non-pdb trajectories :type topology: list """ if metrics is None: metrics = [] pdb = atomset.PDB() pdb.initialise(snapshot, resname=self.resname, resnum=self.resnum, chain=self.resChain, topology=topology) contacts = pdb.countContacts(self.resname, self.contactThresholdDistance, self.resnum, self.resChain) numberOfLigandAtoms = pdb.getNumberOfAtoms() contactsPerAtom = contacts/numberOfLigandAtoms cluster = Cluster(pdb, thresholdRadius=0, contacts=contactsPerAtom, metrics=metrics, metricCol=col) self.clusters.addCluster(cluster) class ClusteringBuilder: def buildClustering(self, clusteringBlock, reportBaseFilename=None, columnOfReportFile=None): """ Builder to create the appropiate clustering object :param clusteringBlock: Parameters of the clustering process :type clusteringBlock: dict :param reportBaseFilename: Name of the file that contains the metrics of the snapshots to cluster :type reportBaseFilename: str :param columnOfReportFile: Column of the report file that contain the metric of interest :type columnOfReportFile: int :returns: :py:class:`.Clustering` -- Clustering object selected """ paramsBlock = clusteringBlock[blockNames.ClusteringTypes.params] try: clusteringType = clusteringBlock[blockNames.ClusteringTypes.type] contactThresholdDistance = paramsBlock.get(blockNames.ClusteringTypes.contactThresholdDistance, 8) altSelection = paramsBlock.get(blockNames.ClusteringTypes.alternativeStructure, False) except KeyError as err: err.message += ": Need to provide mandatory parameter in clustering block" raise KeyError(err.message) resname = str(paramsBlock.get(blockNames.ClusteringTypes.ligandResname, "")).upper() resnum = int(paramsBlock.get(blockNames.ClusteringTypes.ligandResnum, 0)) resChain = str(paramsBlock.get(blockNames.ClusteringTypes.ligandChain, "")).upper() if clusteringType == blockNames.ClusteringTypes.rmsd: symmetries = paramsBlock.get(blockNames.ClusteringTypes.symmetries, []) thresholdCalculatorBuilder = thresholdcalculator.ThresholdCalculatorBuilder() thresholdCalculator = thresholdCalculatorBuilder.build(clusteringBlock) return ContactsClustering(thresholdCalculator, resname=resname, resnum=resnum, resChain=resChain, reportBaseFilename=reportBaseFilename, columnOfReportFile=columnOfReportFile, contactThresholdDistance=contactThresholdDistance, symmetries=symmetries, altSelection=altSelection) elif clusteringType == blockNames.ClusteringTypes.lastSnapshot: return SequentialLastSnapshotClustering(resname=resname, resnum=resnum, resChain=resChain, reportBaseFilename=reportBaseFilename, columnOfReportFile=columnOfReportFile, contactThresholdDistance=contactThresholdDistance) elif clusteringType == blockNames.ClusteringTypes.contactMap: symmetries = paramsBlock.get(blockNames.ClusteringTypes.symmetries, []) thresholdCalculatorBuilder = thresholdcalculator.ThresholdCalculatorBuilder() thresholdCalculator = thresholdCalculatorBuilder.build(clusteringBlock) try: similarityEvaluatorType = paramsBlock[blockNames.ClusteringTypes.similarityEvaluator] except KeyError: raise ValueError("No similarity Evaluator specified!!") similarityBuilder = similarityEvaluatorBuilder() similarityEvaluator = similarityBuilder.build(similarityEvaluatorType) return ContactMapAccumulativeClustering(thresholdCalculator, similarityEvaluator, resname=resname, resnum=resnum, resChain=resChain, reportBaseFilename=reportBaseFilename, columnOfReportFile=columnOfReportFile, contactThresholdDistance=contactThresholdDistance, symmetries=symmetries, altSelection=altSelection) else: sys.exit("Unknown clustering method! Choices are: " + str(clusteringTypes.CLUSTERING_TYPE_TO_STRING_DICTIONARY.values())) class similarityEvaluatorBuilder: def build(self, similarityEvaluatorType): """ Builder to create the appropiate similarityEvaluator :param similarityEvaluatorType: Type of similarityEvaluator chosen :type similarityEvaluatorType: str :returns: :py:class:`.SimilarityEvaluator` -- SimilarityEvaluator object selected """ if similarityEvaluatorType in clusteringTypes.SIMILARITY_TYPES_NAMES: return CMSimilarityEvaluator(similarityEvaluatorType) else: sys.exit("Unknown threshold calculator type! Choices are: " + str(clusteringTypes.SIMILARITY_TYPES_TO_STRING_DICTIONARY.values())) class CMSimilarityEvaluator: """ Evaluate the similarity of two contactMaps by calculating the ratio of the number of differences over the average of elements in the contacts maps, their correlation or their Jaccard index, that is, the ratio between the intersection of the two contact maps and their union """ def __init__(self, typeEvaluator): self.typeEvaluator = typeEvaluator def isSimilarCluster(self, contactMap, clusterContactMap, symContactMapEvaluator): """ Evaluate if two contactMaps are similar or not, return True if yes, False otherwise :param contactMap: contactMap of the structure to compare :type contactMap: numpy.Array :param contactMap: contactMap of the structure to compare :type contactMap: numpy.Array :param symContactMapEvaluator: Contact Map symmetry evaluator object :type symContactMapEvaluator: :py:class:`.SymmetryContactMapEvaluator` :returns: float -- distance between contact maps """ if self.typeEvaluator == blockNames.ClusteringTypes.correlation: return symContactMapEvaluator.evaluateCorrelation(contactMap, clusterContactMap) elif self.typeEvaluator == blockNames.ClusteringTypes.Jaccard: return symContactMapEvaluator.evaluateJaccard(contactMap, clusterContactMap) elif self.typeEvaluator == blockNames.ClusteringTypes.differenceDistance: return symContactMapEvaluator.evaluateDifferenceDistance(contactMap, clusterContactMap) else: raise ValueError("Evaluator type %s not found!!" % self.typeEvaluator) def getAllTrajectories(paths): """ Find all the trajectory files in the paths specified :param paths: The path where to find the trajectories :type paths: str :returns: list -- A list with the names of all th trajectories in paths """ files = [] for path in paths: files += glob.glob(path) # sort the files obtained by glob by name, so that the results will be the # same on all computers return sorted(files)
PypiClean
/Ax_Metrics-0.9.2.tar.gz/Ax_Metrics-0.9.2/py/axonchisel/metrics/io/erout/plugins/ero_geckoboard/numsec.py
from .base import EROut_geckoboard import logging log = logging.getLogger(__name__) # ---------------------------------------------------------------------------- class EROut_geckoboard_numsec(EROut_geckoboard): """ EROut (Extensible Report Outputter) Plugin for Geckoboard number+secondary basic usage type and superclass for advanced number+secondary EROuts. Adds JSON-serializable output to extinfo['jout'] dict. Typical usage is with collapsed query and default 'LAST' reduce function. Non-collapsed queries with other reduce functions may be used too. QFormat support (under 'geckoboard_numsec' or '_default'): reduce : (optional) Function from metricdef.FUNCS to reduce series with. Default 'LAST'. title : (optional) Text field, shown only if no comparison prefix : (Optional) prefix for value, e.g. "$" More info: - https://developer.geckoboard.com/#number-and-secondary-stat Example JSON: { "item": [ { "text": "Revenue yesterday", "value": 123, "prefix": "$" } ] } """ # # Abstract Method Implementations # # abstract def plugin_output(self, mdseries, query=None): """ EROut plugins must implement this abstract method. Invoked to output MultiDataSeries as specified. Returns nothing. Output target should be configured separately. """ log.debug("Outputting %s for query %s", mdseries, query) self._qfdomain = 'geckoboard_numsec' # Write options: self._write_options() # Write item from primary: reduce_func = self._qformat_get('reduce', 'LAST') self._write_primary(reduce_func=reduce_func) # # Protected Methods for Subclasses # def _write_options(self): """ Write options to jout. Note that we support options here useful to our subclasses but not necessarily to this class itself. """ if self.query: if self.query.qdata.get_qmetric(0).impact == 'NEGATIVE': self.jout['reverse'] = True if self._qformat_get('absolute', False): self.jout['absolute'] = True try: qformat = self.query.qformat self.jout['prefix'] = qformat.get(self._qfdomain, 'prefix') except KeyError: pass def _write_primary(self, reduce_func): """ Write the primary value to jout. Uses last point of first DataSeries. """ # Get primary value from first DataSeries: dseries = self.mdseries.get_series(0) value = dseries.reduce(reduce_func) # Add value: item = { 'value': value } item['text'] = self._qformat_get('title', "") self.jout['item'].append(item) # ---------------------------------------------------------------------------- class EROut_geckoboard_numsec_comp(EROut_geckoboard_numsec): """ EROut (Extensible Report Outputter) Plugin for Geckoboard number+secondary stat with comparison. Adds JSON-serializable output to extinfo['jout'] dict. Two values are attempted, first the primary value, then the comparison. Item values are obtained first from first DataSeries (reduced), then from the first Ghost DataSeries (reduced). Geckoboard only wants to see 1 or 2 values, so either invoke with a single query with ghosts, or invoke with 2 queries without ghosts. Once 2 values are obtained, additional values are ignored. Typical usage is with collapsed query and default 'LAST' reduce function. Non-collapsed queries with other reduce functions may be used too. QFormat support (under 'geckoboard_numsec_comp' or '_default'): reduce : (optional) Function from metricdef.FUNCS to reduce series with. Default 'LAST'. title : (optional) Text field, shown only if no comparison absolute : (optional) true for absolute comparison prefix : (Optional) prefix for value, e.g. "$" More info: - https://developer.geckoboard.com/#number-and-secondary-stat - https://developer.geckoboard.com/#comparison-example Example JSON: { "item": [ { "value": 142 }, { "value": 200 } ] } """ # # Abstract Method Implementations # # abstract def plugin_output(self, mdseries, query=None): """ EROut plugins must implement this abstract method. Invoked to output MultiDataSeries as specified. Returns nothing. Output target should be configured separately. """ log.debug("Outputting %s for query %s", mdseries, query) self._qfdomain = 'geckoboard_numsec_comp' # Write options: self._write_options() # Write item from primary and ghost (as long as we're not maxed): if self._has_max_items(): return reduce_func = self._qformat_get('reduce', 'LAST') self._write_primary(reduce_func=reduce_func) if self._has_max_items(): return self._write_comparison() # # Internal Methods # def _has_max_items(self): """Return bool indicating if we've accumulated max (2) item values.""" return len(self.jout['item']) >= 2 def _write_comparison(self): """ Write the comparison value to jout. Reduces first Ghost DataSeries. If no ghosts found, no comparison value is written at this point, but if subsequent query outputs are fed through this same EROut instance, the next one may provide comparison via its actual data. """ # Find first ghost series: try: ds_ghost = next(self.mdseries.iter_ghost_series()) except StopIteration: # No additional (ghost) series, so leave it at that. return # Reduce ghost series: reduce_func = self._qformat_get('reduce', 'LAST') value = ds_ghost.reduce(reduce_func) # Add value: item = { 'value': value } self.jout['item'].append(item) # ---------------------------------------------------------------------------- class EROut_geckoboard_numsec_trend(EROut_geckoboard_numsec): """ EROut (Extensible Report Outputter) Plugin for Geckoboard number+secondary stat with trendline. Adds JSON-serializable output to extinfo['jout'] dict. Item value is obtained first first DataSeries, reduced. Trend values are obtained from entire first DataSeries. Only first DataSeries is used. The default reduce function is SUM, which presents the primary stat as the sum of the trend points. For some situations this may not be appropriate, and LAST or other reduce funcs may be provided. Unlike most other numsec EROuts, collapse mode should not be enabled, since we actually care about the data points here. Ghosts however should typically be disabled, since they are ignored. QFormat support (under 'geckoboard_numsec_trend' or '_default'): reduce : (optional) Function from metricdef.FUNCS to reduce series with. Default 'SUM'. title : (optional) Text field, shown only if no comparison prefix : (optional) prefix for value, e.g. "$" More info: - https://developer.geckoboard.com/#number-and-secondary-stat - https://developer.geckoboard.com/#trendline-example Example JSON: { "item": [ { "text": "Past 7 days", "value": "274057" }, [ "38594", "39957", "35316", "35913", "36668", "45660", "41949" ] ] } """ # # Abstract Method Implementations # # abstract def plugin_output(self, mdseries, query=None): """ EROut plugins must implement this abstract method. Invoked to output MultiDataSeries as specified. Returns nothing. Output target should be configured separately. """ log.debug("Outputting %s for query %s", mdseries, query) self._qfdomain = 'geckoboard_numsec_trend' # Write options: self._write_options() # Write item from primary: reduce_func = 'SUM' if self.query: reduce_func = self._qformat_get('reduce', reduce_func) self._write_primary(reduce_func=reduce_func) # Write trend values: self._write_trend() # # Internal Methods # def _write_trend(self): """ Write the trend values to jout. Uses all the data points of the first DataSeries. """ # Get all values from first DataSeries: values = [dp.value for dp in self.mdseries.get_series(0).iter_points()] # Add values: self.jout['item'].append(values) # ----------------------------------------------------------------------------
PypiClean
/Loterias-0.0.11.tar.gz/Loterias-0.0.11/README.md
#Biblioteca Python para extrair, via thread, dados de jogos lotéricos Sintaxe básica from Loterias import CollectionType, CollectionByType Onde: CollectionType Contem identificador básico para coleções de dados CollectionByType(typed=CollectionType.DiaDeSorte, maxThreads=8, concursoStart=1, verbose=False, autoStart=False) Instancia a classe para busca de dados typed Tipo de jogo, definido por CollectionType maxThreads Número de máximo de Threads, default 8 [para múltipla pesquisa] concursoStart Número do primeiro concurso que será pesquisado, default 1 verbose Apresenta ou não algumas informações durante a pesquisa autoStart Inicia a pesquisa de imediato Elementos/Rotinas de CollectionByType maxThreads Ajusta o número máximo de threads [para múltipla pesquisa] sorteioInicial Ajusta o número do sorteio inicial sorteioFinal Limita o número do sorteio final [default é o último sorteio verbose Apresenta ou não algumas informações durante a pesquisa value Inicia a busca, se necessário, de dados e apresenta o resultado em pandas.DataFrame v 0.0.5 Adicionados métodos getDezenas Extrai a lista de dezemas getSomas Extrai a lista de somas, considerado cada sorteio getContagem Extrai a contagem individual relativo a cada sorteio
PypiClean
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/available_actions.py
import re # noqa: F401 import sys # noqa: F401 from typing import ( Optional, Union, List, Dict, ) from MergePythonSDK.shared.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, OpenApiModel, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from MergePythonSDK.shared.exceptions import ApiAttributeError from MergePythonSDK.shared.model_utils import import_model_by_name def lazy_import(): from MergePythonSDK.hris.model.account_integration import AccountIntegration from MergePythonSDK.hris.model.model_operation import ModelOperation globals()['AccountIntegration'] = AccountIntegration globals()['ModelOperation'] = ModelOperation class AvailableActions(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() defined_types = { 'integration': (AccountIntegration,), # noqa: E501 'passthrough_available': (bool, none_type,), # noqa: E501 'available_model_operations': ([ModelOperation], none_type,), # noqa: E501 } return defined_types @cached_property def discriminator(): return None attribute_map = { 'integration': 'integration', # noqa: E501 'passthrough_available': 'passthrough_available', # noqa: E501 'available_model_operations': 'available_model_operations', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, integration, passthrough_available, *args, **kwargs): # noqa: E501 """AvailableActions - a model defined in OpenAPI Args: integration (AccountIntegration): passthrough_available (bool): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) available_model_operations ([ModelOperation]): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.integration = integration self.passthrough_available = passthrough_available self.available_model_operations = kwargs.get("available_model_operations", None) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, integration, passthrough_available, *args, **kwargs): # noqa: E501 """AvailableActions - a model defined in OpenAPI Args: integration (AccountIntegration): passthrough_available (bool): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) available_model_operations ([ModelOperation]): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.integration: Union["AccountIntegration"] = integration self.passthrough_available: Union[bool] = passthrough_available self.available_model_operations: Union[List["ModelOperation"]] = kwargs.get("available_model_operations", None)
PypiClean
/MITuna-1.0.tar.gz/MITuna-1.0/tuna/connection.py
"""Connection class represents a DB connection. Used by machine to establish new DB connections""" import socket from random import randrange from subprocess import Popen, PIPE, STDOUT from time import sleep from io import StringIO import paramiko from tuna.utils.logger import setup_logger from tuna.abort import chk_abort_file NUM_SSH_RETRIES = 40 NUM_CMD_RETRIES = 30 SSH_TIMEOUT = 60.0 # in seconds class Connection(): """Connection class defined an ssh or ftp client connection. Instantiated by the machine class""" #pylint: disable=no-member def __init__(self, **kwargs): #pylint self.logger = None self.local_machine = False self.subp = None # Holds the subprocess obj for local_machine self.out_channel = None # Holds the out channel for remote connection allowed_keys = set([ 'id', 'hostname', 'user', 'password', 'port', 'local_machine', 'chk_abort_file', 'logger' ]) self.__dict__.update((key, None) for key in allowed_keys) self.__dict__.update( (key, value) for key, value in kwargs.items() if key in allowed_keys) self.ssh = None if self.logger is None: self.logger = setup_logger('Connection') self.inst_bins = {'which': True, 'cd': True} self.connect(self.chk_abort_file) def check_binary(self, bin_str): """Checking existence of binary""" if bin_str in self.inst_bins: return self.inst_bins[bin_str] cmd = f"which {bin_str}" _, out, _ = self.exec_command(cmd) if not out: return False ret = False for line in out: if bin_str in line: ret = True break self.inst_bins[bin_str] = ret return ret @staticmethod def get_bin_str(cmd): """Helper function""" bin_str = '' line = cmd[:] args = line.split(' ') for arg in args: arg = arg.strip() if not arg: # skip empty tokens due to white spaces continue if '=' in arg: continue if arg in ('export', 'sudo', '&&'): continue bin_str = arg break return bin_str def test_cmd_str(self, cmd): """Function to look for installed binary""" split_cmd = cmd[:] split_cmd = split_cmd.replace('|', ';') split_cmd = split_cmd.split(';') for sub_cmd in split_cmd: sub_cmd = sub_cmd.strip() bin_str = self.get_bin_str(sub_cmd) installed = self.check_binary(bin_str) if not installed: self.logger.error('Tuna cannot find binary: %s', bin_str) return False return True def is_alive(self): ''' Check if the launched process is running ''' if self.local_machine: # pylint: disable=no-else-return if not self.subp: self.logger.error('Checking isAlive when process was not created') return self.subp.poll() is None else: if not self.out_channel: self.logger.error('Checking isAlive when channel does not exist') return not self.out_channel.exit_status_ready() def exec_command_unparsed(self, cmd, timeout=SSH_TIMEOUT, abort=None): """Function to exec commands warning: leaky! client code responsible for closing the resources! """ # pylint: disable=broad-except if not self.test_cmd_str(cmd): raise ValueError(f'Machine {self.id} failed, missing binary: {cmd}') if self.local_machine: #universal_newlines corrects output format to utf-8 # pylint: disable=consider-using-with ; see exec_command_unparsed's docstring self.subp = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True, close_fds=True, universal_newlines=True) stdout, stderr = self.subp.stdout, self.subp.stderr return 0, stdout, stderr for cmd_idx in range(NUM_CMD_RETRIES): try: if self.ssh is None or not self.ssh.get_transport( ) or not self.ssh.get_transport().is_active(): self.ssh_connect() i_var, o_var, e_var = self.ssh.exec_command(cmd, timeout=timeout) except Exception as exc: self.logger.warning('Machine %s failed to execute command: %s', self.id, cmd) self.logger.warning('Exception occurred %s', exc) self.logger.warning('Retrying ... %s', cmd_idx) retry_interval = randrange(SSH_TIMEOUT) self.logger.warning('sleeping for %s seconds', retry_interval) sleep(retry_interval) else: self.out_channel = o_var.channel return i_var, o_var, e_var if abort is not None and chk_abort_file(self.id, self.logger): self.logger.warning('Machine %s aborted command execution: %s', self.id, cmd) return None, None, None self.logger.error('cmd_exec retries exhausted, giving up') return None, None, None def exec_command(self, cmd, timeout=SSH_TIMEOUT, abort=None, proc_line=None): # pylint: disable=too-many-nested-blocks, too-many-branches """Function to exec commands""" _, o_var, e_var = self.exec_command_unparsed(cmd, timeout, abort) try: if not o_var: return 1, o_var, e_var if not proc_line: # pylint: disable-next=unnecessary-lambda-assignment ; more readable proc_line = lambda x: self.logger.info(line.strip()) ret_out = StringIO() ret_err = e_var try: while True: line = o_var.readline() if line == '' and not self.is_alive(): break if line: proc_line(line) ret_out.write(line) ret_out.seek(0) if self.local_machine: ret_code = self.subp.returncode if ret_out: ret_out.seek(0) ret_err = StringIO() err_str = ret_out.readlines() for line in err_str[-5:]: ret_err.write(line) ret_err.seek(0) ret_out.seek(0) else: ret_code = self.out_channel.recv_exit_status() except (socket.timeout, socket.error) as exc: self.logger.warning('Exception occurred %s', exc) ret_code = 1 ret_out = None return ret_code, ret_out, ret_err finally: if o_var and hasattr(o_var, "close"): o_var.close() if e_var and hasattr(e_var, "close"): e_var.close() def connect(self, abort=None): """Establishing new connecion""" if not self.local_machine: self.ssh_connect(abort) def ssh_connect(self, abort=None): """Establishing ssh connection""" ret = True if self.ssh is not None and self.ssh.get_transport( ) and self.ssh.get_transport().is_active(): ret = False return ret self.ssh = paramiko.SSHClient() self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) for ssh_idx in range(NUM_SSH_RETRIES): if abort is not None and chk_abort_file(self.id, self.logger): self.logger.warning('Machine %s aborted ssh connection', self.id) return False try: self.ssh.connect(self.hostname, username=self.user, password=self.password, port=self.port, timeout=SSH_TIMEOUT, allow_agent=False) except paramiko.ssh_exception.BadHostKeyException: self.ssh = None self.logger.error('Bad host exception which connecting to host: %s', self.hostname) except (paramiko.ssh_exception.SSHException, socket.error): retry_interval = randrange(SSH_TIMEOUT) self.logger.warning( 'Attempt %s to connect to machine %s (%s p%s) via ssh failed, sleeping for %s seconds', ssh_idx, self.id, self.hostname, self.port, retry_interval) sleep(retry_interval) else: self.logger.info( 'SSH connection successfully established to machine %s', self.id) return True self.logger.error('SSH retries exhausted machine: %s', self.hostname) return False def open_sftp(self): """Helper function for ftp client""" ftp_client = None if self.ssh and not self.local_machine: ftp_client = self.ssh.open_sftp() return ftp_client
PypiClean
/JitViewer-0.2.1.tar.gz/JitViewer-0.2.1/_jitviewer/static/canjs/1.1.4/can.jquery-all.js
module = { _orig: window.module, _define: window.define }; module['jquery'] = jQuery; module['jquery/jquery.js'] = jQuery; define = function (id, deps, value) { module[id] = value(); }; define.amd = { jQuery: true }; // ## can/util/can.js module['can/util/can.js'] = (function () { var can = window.can || {}; if (typeof GLOBALCAN === 'undefined' || GLOBALCAN !== false) { window.can = can; } can.isDeferred = function (obj) { var isFunction = this.isFunction; // Returns `true` if something looks like a deferred. return obj && isFunction(obj.then) && isFunction(obj.pipe); }; var cid = 0; can.cid = function (object, name) { if (object._cid) { return object._cid } else { return object._cid = (name || "") + (++cid) } } return can; })(); // ## can/util/array/each.js module['can/util/array/each.js'] = (function (can) { can.each = function (elements, callback, context) { var i = 0, key; if (elements) { if (typeof elements.length === 'number' && elements.pop) { if (elements.attr) { elements.attr('length'); } for (key = elements.length; i < key; i++) { if (callback.call(context || elements[i], elements[i], i, elements) === false) { break; } } } else if (elements.hasOwnProperty) { for (key in elements) { if (elements.hasOwnProperty(key)) { if (callback.call(context || elements[key], elements[key], key, elements) === false) { break; } } } } } return elements; }; return can; })(module["can/util/can.js"]); // ## can/util/jquery/jquery.js module['can/util/jquery/jquery.js'] = (function ($, can) { // _jQuery node list._ $.extend(can, $, { trigger: function (obj, event, args) { if (obj.trigger) { obj.trigger(event, args); } else { $.event.trigger(event, args, obj, true); } }, addEvent: function (ev, cb) { $([this]).bind(ev, cb); return this; }, removeEvent: function (ev, cb) { $([this]).unbind(ev, cb); return this; }, // jquery caches fragments, we always needs a new one buildFragment: function (elems, context) { var oldFragment = $.buildFragment, ret; elems = [elems]; // Set context per 1.8 logic context = context || document; context = !context.nodeType && context[0] || context; context = context.ownerDocument || context; ret = oldFragment.call(jQuery, elems, context); return ret.cacheable ? $.clone(ret.fragment) : ret.fragment || ret; }, $: $, each: can.each }); // Wrap binding functions. $.each(['bind', 'unbind', 'undelegate', 'delegate'], function (i, func) { can[func] = function () { var t = this[func] ? this : $([this]); t[func].apply(t, arguments); return this; }; }); // Wrap modifier functions. $.each(["append", "filter", "addClass", "remove", "data", "get"], function (i, name) { can[name] = function (wrapped) { return wrapped[name].apply(wrapped, can.makeArray(arguments).slice(1)); }; }); // Memory safe destruction. var oldClean = $.cleanData; $.cleanData = function (elems) { $.each(elems, function (i, elem) { if (elem) { can.trigger(elem, "destroyed", [], false); } }); oldClean(elems); }; return can; })(module["jquery/jquery.js"], module["can/util/can.js"], module["can/util/array/each.js"]); // ## can/util/string/string.js module['can/util/string/string.js'] = (function (can) { // ##string.js // _Miscellaneous string utility functions._ // Several of the methods in this plugin use code adapated from Prototype // Prototype JavaScript framework, version 1.6.0.1. // © 2005-2007 Sam Stephenson var strUndHash = /_|-/, strColons = /\=\=/, strWords = /([A-Z]+)([A-Z][a-z])/g, strLowUp = /([a-z\d])([A-Z])/g, strDash = /([a-z\d])([A-Z])/g, strReplacer = /\{([^\}]+)\}/g, strQuote = /"/g, strSingleQuote = /'/g, // Returns the `prop` property from `obj`. // If `add` is true and `prop` doesn't exist in `obj`, create it as an // empty object. getNext = function (obj, prop, add) { return prop in obj ? obj[prop] : (add && (obj[prop] = {})); }, // Returns `true` if the object can have properties (no `null`s). isContainer = function (current) { return (/^f|^o/).test(typeof current); }; can.extend(can, { // Escapes strings for HTML. esc: function (content) { // Convert bad values into empty strings var isInvalid = content === null || content === undefined || (isNaN(content) && ("" + content === 'NaN')); return ("" + (isInvalid ? '' : content)).replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(strQuote, '&#34;').replace(strSingleQuote, "&#39;"); }, getObject: function (name, roots, add) { // The parts of the name we are looking up // `['App','Models','Recipe']` var parts = name ? name.split('.') : [], length = parts.length, current, r = 0, ret, i; // Make sure roots is an `array`. roots = can.isArray(roots) ? roots : [roots || window]; if (!length) { return roots[0]; } // For each root, mark it as current. while (roots[r]) { current = roots[r]; // Walk current to the 2nd to last object or until there // is not a container. for (i = 0; i < length - 1 && isContainer(current); i++) { current = getNext(current, parts[i], add); } // If we can get a property from the 2nd to last object... if (isContainer(current)) { // Get (and possibly set) the property. ret = getNext(current, parts[i], add); // If there is a value, we exit. if (ret !== undefined) { // If `add` is `false`, delete the property if (add === false) { delete current[parts[i]]; } return ret; } } r++; } }, // Capitalizes a string. capitalize: function (s, cache) { // Used to make newId. return s.charAt(0).toUpperCase() + s.slice(1); }, // Underscores a string. underscore: function (s) { return s.replace(strColons, '/').replace(strWords, '$1_$2').replace(strLowUp, '$1_$2').replace(strDash, '_').toLowerCase(); }, // Micro-templating. sub: function (str, data, remove) { var obs = []; obs.push(str.replace(strReplacer, function (whole, inside) { // Convert inside to type. var ob = can.getObject(inside, data, remove === undefined ? remove : !remove); if (ob === undefined) { obs = null; return ""; } // If a container, push into objs (which will return objects found). if (isContainer(ob) && obs) { obs.push(ob); return ""; } return "" + ob; })); return obs === null ? obs : (obs.length <= 1 ? obs[0] : obs); }, // These regex's are used throughout the rest of can, so let's make // them available. replacer: strReplacer, undHash: strUndHash }); return can; })(module["can/util/jquery/jquery.js"]); // ## can/construct/construct.js module['can/construct/construct.js'] = (function (can) { // ## construct.js // `can.Construct` // _This is a modified version of // [John Resig's class](http://ejohn.org/blog/simple-javascript-inheritance/). // It provides class level inheritance and callbacks._ // A private flag used to initialize a new class instance without // initializing it's bindings. var initializing = 0; can.Construct = function () { if (arguments.length) { return can.Construct.extend.apply(can.Construct, arguments); } }; can.extend(can.Construct, { newInstance: function () { // Get a raw instance object (`init` is not called). var inst = this.instance(), arg = arguments, args; // Call `setup` if there is a `setup` if (inst.setup) { args = inst.setup.apply(inst, arguments); } // Call `init` if there is an `init` // If `setup` returned `args`, use those as the arguments if (inst.init) { inst.init.apply(inst, args || arguments); } return inst; }, // Overwrites an object with methods. Used in the `super` plugin. // `newProps` - New properties to add. // `oldProps` - Where the old properties might be (used with `super`). // `addTo` - What we are adding to. _inherit: function (newProps, oldProps, addTo) { can.extend(addTo || newProps, newProps || {}) }, // used for overwriting a single property. // this should be used for patching other objects // the super plugin overwrites this _overwrite: function (what, oldProps, propName, val) { what[propName] = val; }, // Set `defaults` as the merger of the parent `defaults` and this // object's `defaults`. If you overwrite this method, make sure to // include option merging logic. setup: function (base, fullName) { this.defaults = can.extend(true, {}, base.defaults, this.defaults); }, // Create's a new `class` instance without initializing by setting the // `initializing` flag. instance: function () { // Prevents running `init`. initializing = 1; var inst = new this(); // Allow running `init`. initializing = 0; return inst; }, // Extends classes. extend: function (fullName, klass, proto) { // Figure out what was passed and normalize it. if (typeof fullName != 'string') { proto = klass; klass = fullName; fullName = null; } if (!proto) { proto = klass; klass = null; } proto = proto || {}; var _super_class = this, _super = this.prototype, name, shortName, namespace, prototype; // Instantiate a base class (but only create the instance, // don't run the init constructor). prototype = this.instance(); // Copy the properties over onto the new prototype. can.Construct._inherit(proto, _super, prototype); // The dummy class constructor. function Constructor() { // All construction is actually done in the init method. if (!initializing) { return this.constructor !== Constructor && arguments.length ? // We are being called without `new` or we are extending. arguments.callee.extend.apply(arguments.callee, arguments) : // We are being called with `new`. this.constructor.newInstance.apply(this.constructor, arguments); } } // Copy old stuff onto class (can probably be merged w/ inherit) for (name in _super_class) { if (_super_class.hasOwnProperty(name)) { Constructor[name] = _super_class[name]; } } // Copy new static properties on class. can.Construct._inherit(klass, _super_class, Constructor); // Setup namespaces. if (fullName) { var parts = fullName.split('.'), shortName = parts.pop(), current = can.getObject(parts.join('.'), window, true), namespace = current, _fullName = can.underscore(fullName.replace(/\./g, "_")), _shortName = can.underscore(shortName); current[shortName] = Constructor; } // Set things that shouldn't be overwritten. can.extend(Constructor, { constructor: Constructor, prototype: prototype, namespace: namespace, shortName: shortName, _shortName: _shortName, fullName: fullName, _fullName: _fullName }); // Make sure our prototype looks nice. Constructor.prototype.constructor = Constructor; // Call the class `setup` and `init` var t = [_super_class].concat(can.makeArray(arguments)), args = Constructor.setup.apply(Constructor, t); if (Constructor.init) { Constructor.init.apply(Constructor, args || t); } return Constructor; } }); return can.Construct; })(module["can/util/string/string.js"]); // ## can/construct/proxy/proxy.js module['can/construct/proxy/proxy.js'] = (function (can, Construct) { var isFunction = can.isFunction, isArray = can.isArray, makeArray = can.makeArray, proxy = function (funcs) { //args that should be curried var args = makeArray(arguments), self; // get the functions to callback funcs = args.shift(); // if there is only one function, make funcs into an array if (!isArray(funcs)) { funcs = [funcs]; } // keep a reference to us in self self = this; return function class_cb() { // add the arguments after the curried args var cur = args.concat(makeArray(arguments)), isString, length = funcs.length, f = 0, func; // go through each function to call back for (; f < length; f++) { func = funcs[f]; if (!func) { continue; } // set called with the name of the function on self (this is how this.view works) isString = typeof func == "string"; // call the function cur = (isString ? self[func] : func).apply(self, cur || []); // pass the result to the next function (if there is a next function) if (f < length - 1) { cur = !isArray(cur) || cur._use_call ? [cur] : cur } } return cur; } } can.Construct.proxy = can.Construct.prototype.proxy = proxy; // this corrects the case where can/control loads after can/construct/proxy, so static props don't have proxy var correctedClasses = [can.Observe, can.Control, can.Model], i = 0; for (; i < correctedClasses.length; i++) { if (correctedClasses[i]) { correctedClasses[i].proxy = proxy; } } return can; })(module["can/util/jquery/jquery.js"], module["can/construct/construct.js"]); // ## can/construct/super/super.js module['can/construct/super/super.js'] = (function (can, Construct) { // tests if we can get super in .toString() var isFunction = can.isFunction, fnTest = /xyz/.test(function () { xyz; }) ? /\b_super\b/ : /.*/; // overwrites a single property so it can still call super can.Construct._overwrite = function (addTo, base, name, val) { // Check if we're overwriting an existing function addTo[name] = isFunction(val) && isFunction(base[name]) && fnTest.test(val) ? (function (name, fn) { return function () { var tmp = this._super, ret; // Add a new ._super() method that is the same method // but on the super-class this._super = base[name]; // The method only need to be bound temporarily, so we // remove it when we're done executing ret = fn.apply(this, arguments); this._super = tmp; return ret; }; })(name, val) : val; } // overwrites an object with methods, sets up _super // newProps - new properties // oldProps - where the old properties might be // addTo - what we are adding to can.Construct._inherit = function (newProps, oldProps, addTo) { addTo = addTo || newProps for (var name in newProps) { can.Construct._overwrite(addTo, oldProps, name, newProps[name]); } } return can; })(module["can/util/jquery/jquery.js"], module["can/construct/construct.js"]); // ## can/control/control.js module['can/control/control.js'] = (function (can) { // ## control.js // `can.Control` // _Controller_ // Binds an element, returns a function that unbinds. var bind = function (el, ev, callback) { can.bind.call(el, ev, callback); return function () { can.unbind.call(el, ev, callback); }; }, isFunction = can.isFunction, extend = can.extend, each = can.each, slice = [].slice, paramReplacer = /\{([^\}]+)\}/g, special = can.getObject("$.event.special", [can]) || {}, // Binds an element, returns a function that unbinds. delegate = function (el, selector, ev, callback) { can.delegate.call(el, selector, ev, callback); return function () { can.undelegate.call(el, selector, ev, callback); }; }, // Calls bind or unbind depending if there is a selector. binder = function (el, ev, callback, selector) { return selector ? delegate(el, can.trim(selector), ev, callback) : bind(el, ev, callback); }, basicProcessor; var Control = can.Control = can.Construct( { // Setup pre-processes which methods are event listeners. setup: function () { // Allow contollers to inherit "defaults" from super-classes as it // done in `can.Construct` can.Construct.setup.apply(this, arguments); // If you didn't provide a name, or are `control`, don't do anything. if (can.Control) { // Cache the underscored names. var control = this, funcName; // Calculate and cache actions. control.actions = {}; for (funcName in control.prototype) { if (control._isAction(funcName)) { control.actions[funcName] = control._action(funcName); } } } }, // Moves `this` to the first argument, wraps it with `jQuery` if it's an element _shifter: function (context, name) { var method = typeof name == "string" ? context[name] : name; if (!isFunction(method)) { method = context[method]; } return function () { context.called = name; return method.apply(context, [this.nodeName ? can.$(this) : this].concat(slice.call(arguments, 0))); }; }, // Return `true` if is an action. _isAction: function (methodName) { var val = this.prototype[methodName], type = typeof val; // if not the constructor return (methodName !== 'constructor') && // and is a function or links to a function (type == "function" || (type == "string" && isFunction(this.prototype[val]))) && // and is in special, a processor, or has a funny character !! (special[methodName] || processors[methodName] || /[^\w]/.test(methodName)); }, // Takes a method name and the options passed to a control // and tries to return the data necessary to pass to a processor // (something that binds things). _action: function (methodName, options) { // If we don't have options (a `control` instance), we'll run this // later. paramReplacer.lastIndex = 0; if (options || !paramReplacer.test(methodName)) { // If we have options, run sub to replace templates `{}` with a // value from the options or the window var convertedName = options ? can.sub(methodName, [options, window]) : methodName; if (!convertedName) { return null; } // If a `{}` template resolves to an object, `convertedName` will be // an array var arr = can.isArray(convertedName), // Get the name name = arr ? convertedName[1] : convertedName, // Grab the event off the end parts = name.split(/\s+/g), event = parts.pop(); return { processor: processors[event] || basicProcessor, parts: [name, parts.join(" "), event], delegate: arr ? convertedName[0] : undefined }; } }, // An object of `{eventName : function}` pairs that Control uses to // hook up events auto-magically. processors: {}, // A object of name-value pairs that act as default values for a // control instance defaults: {} }, { // Sets `this.element`, saves the control in `data, binds event // handlers. setup: function (element, options) { var cls = this.constructor, pluginname = cls.pluginName || cls._fullName, arr; // Want the raw element here. this.element = can.$(element) if (pluginname && pluginname !== 'can_control') { // Set element and `className` on element. this.element.addClass(pluginname); } (arr = can.data(this.element, "controls")) || can.data(this.element, "controls", arr = []); arr.push(this); // Option merging. this.options = extend({}, cls.defaults, options); // Bind all event handlers. this.on(); // Get's passed into `init`. return [this.element, this.options]; }, on: function (el, selector, eventName, func) { if (!el) { // Adds bindings. this.off(); // Go through the cached list of actions and use the processor // to bind var cls = this.constructor, bindings = this._bindings, actions = cls.actions, element = this.element, destroyCB = can.Control._shifter(this, "destroy"), funcName, ready; for (funcName in actions) { // Only push if we have the action and no option is `undefined` if (actions.hasOwnProperty(funcName) && (ready = actions[funcName] || cls._action(funcName, this.options))) { bindings.push(ready.processor(ready.delegate || element, ready.parts[2], ready.parts[1], funcName, this)); } } // Setup to be destroyed... // don't bind because we don't want to remove it. can.bind.call(element, "destroyed", destroyCB); bindings.push(function (el) { can.unbind.call(el, "destroyed", destroyCB); }); return bindings.length; } if (typeof el == 'string') { func = eventName; eventName = selector; selector = el; el = this.element; } if (func === undefined) { func = eventName; eventName = selector; selector = null; } if (typeof func == 'string') { func = can.Control._shifter(this, func); } this._bindings.push(binder(el, eventName, func, selector)); return this._bindings.length; }, // Unbinds all event handlers on the controller. off: function () { var el = this.element[0] each(this._bindings || [], function (value) { value(el); }); // Adds bindings. this._bindings = []; }, // Prepares a `control` for garbage collection destroy: function () { var Class = this.constructor, pluginName = Class.pluginName || Class._fullName, controls; // Unbind bindings. this.off(); if (pluginName && pluginName !== 'can_control') { // Remove the `className`. this.element.removeClass(pluginName); } // Remove from `data`. controls = can.data(this.element, "controls"); controls.splice(can.inArray(this, controls), 1); can.trigger(this, "destroyed"); // In case we want to know if the `control` is removed. this.element = null; } }); var processors = can.Control.processors, // Processors do the binding. // They return a function that unbinds when called. // The basic processor that binds events. basicProcessor = function (el, event, selector, methodName, control) { return binder(el, event, can.Control._shifter(control, methodName), selector); }; // Set common events to be processed as a `basicProcessor` each(["change", "click", "contextmenu", "dblclick", "keydown", "keyup", "keypress", "mousedown", "mousemove", "mouseout", "mouseover", "mouseup", "reset", "resize", "scroll", "select", "submit", "focusin", "focusout", "mouseenter", "mouseleave", // #104 - Add touch events as default processors // TOOD feature detect? "touchstart", "touchmove", "touchcancel", "touchend", "touchleave"], function (v) { processors[v] = basicProcessor; }); return Control; })(module["can/util/jquery/jquery.js"], module["can/construct/construct.js"]); // ## can/control/plugin/plugin.js module['can/control/plugin/plugin.js'] = (function ($, can) { //used to determine if a control instance is one of controllers //controllers can be strings or classes var i, isAControllerOf = function (instance, controllers) { for (i = 0; i < controllers.length; i++) { if (typeof controllers[i] == 'string' ? instance.constructor._shortName == controllers[i] : instance instanceof controllers[i]) { return true; } } return false; }, makeArray = can.makeArray, old = can.Control.setup; can.Control.setup = function () { // if you didn't provide a name, or are control, don't do anything if (this !== can.Control) { var pluginName = this.pluginName || this._fullName; // create jQuery plugin if (pluginName !== 'can_control') { this.plugin(pluginName); } old.apply(this, arguments); } }; $.fn.extend({ controls: function () { var controllerNames = makeArray(arguments), instances = [], controls, c, cname; //check if arguments this.each(function () { controls = can.$(this).data("controls"); if (!controls) { return; } for (var i = 0; i < controls.length; i++) { c = controls[i]; if (!controllerNames.length || isAControllerOf(c, controllerNames)) { instances.push(c); } } }); return instances; }, control: function (control) { return this.controls.apply(this, arguments)[0]; } }); can.Control.plugin = function (pluginname) { var control = this; if (!$.fn[pluginname]) { $.fn[pluginname] = function (options) { var args = makeArray(arguments), //if the arg is a method on this control isMethod = typeof options == "string" && $.isFunction(control.prototype[options]), meth = args[0], returns; this.each(function () { //check if created var plugin = can.$(this).control(control); if (plugin) { if (isMethod) { // call a method on the control with the remaining args returns = plugin[meth].apply(plugin, args.slice(1)); } else { // call the plugin's update method plugin.update.apply(plugin, args); } } else { //create a new control instance control.newInstance.apply(control, [this].concat(args)); } }); return returns !== undefined ? returns : this; }; } } can.Control.prototype.update = function (options) { can.extend(this.options, options); this.on(); }; return can; })(module["jquery/jquery.js"], module["can/util/jquery/jquery.js"], module["can/control/control.js"]); // ## can/view/view.js module['can/view/view.js'] = (function (can) { // ## view.js // `can.view` // _Templating abstraction._ var isFunction = can.isFunction, makeArray = can.makeArray, // Used for hookup `id`s. hookupId = 1, $view = can.view = function (view, data, helpers, callback) { // If helpers is a `function`, it is actually a callback. if (isFunction(helpers)) { callback = helpers; helpers = undefined; } var pipe = function (result) { return $view.frag(result); }, // In case we got a callback, we need to convert the can.view.render // result to a document fragment wrapCallback = isFunction(callback) ? function (frag) { callback(pipe(frag)); } : null, // Get the result. result = $view.render(view, data, helpers, wrapCallback), deferred = can.Deferred(); if (isFunction(result)) { return result; } if (can.isDeferred(result)) { result.done(function (result, data) { deferred.resolve.call(deferred, pipe(result), data); }); return deferred; } // Convert it into a dom frag. return pipe(result); }; can.extend($view, { // creates a frag and hooks it up all at once frag: function (result, parentNode) { return $view.hookup($view.fragment(result), parentNode); }, // simply creates a frag // this is used internally to create a frag // insert it // then hook it up fragment: function (result) { var frag = can.buildFragment(result, document.body); // If we have an empty frag... if (!frag.childNodes.length) { frag.appendChild(document.createTextNode('')); } return frag; }, // Convert a path like string into something that's ok for an `element` ID. toId: function (src) { return can.map(src.toString().split(/\/|\./g), function (part) { // Dont include empty strings in toId functions if (part) { return part; } }).join("_"); }, hookup: function (fragment, parentNode) { var hookupEls = [], id, func; // Get all `childNodes`. can.each(fragment.childNodes ? can.makeArray(fragment.childNodes) : fragment, function (node) { if (node.nodeType === 1) { hookupEls.push(node); hookupEls.push.apply(hookupEls, can.makeArray(node.getElementsByTagName('*'))); } }); // Filter by `data-view-id` attribute. can.each(hookupEls, function (el) { if (el.getAttribute && (id = el.getAttribute('data-view-id')) && (func = $view.hookups[id])) { func(el, parentNode, id); delete $view.hookups[id]; el.removeAttribute('data-view-id'); } }); return fragment; }, hookups: {}, hook: function (cb) { $view.hookups[++hookupId] = cb; return " data-view-id='" + hookupId + "'"; }, cached: {}, cachedRenderers: {}, cache: true, register: function (info) { this.types["." + info.suffix] = info; }, types: {}, ext: ".ejs", registerScript: function () {}, preload: function () {}, render: function (view, data, helpers, callback) { // If helpers is a `function`, it is actually a callback. if (isFunction(helpers)) { callback = helpers; helpers = undefined; } // See if we got passed any deferreds. var deferreds = getDeferreds(data); if (deferreds.length) { // Does data contain any deferreds? // The deferred that resolves into the rendered content... var deferred = new can.Deferred(), dataCopy = can.extend({}, data); // Add the view request to the list of deferreds. deferreds.push(get(view, true)) // Wait for the view and all deferreds to finish... can.when.apply(can, deferreds).then(function (resolved) { // Get all the resolved deferreds. var objs = makeArray(arguments), // Renderer is the last index of the data. renderer = objs.pop(), // The result of the template rendering with data. result; // Make data look like the resolved deferreds. if (can.isDeferred(data)) { dataCopy = usefulPart(resolved); } else { // Go through each prop in data again and // replace the defferreds with what they resolved to. for (var prop in data) { if (can.isDeferred(data[prop])) { dataCopy[prop] = usefulPart(objs.shift()); } } } // Get the rendered result. result = renderer(dataCopy, helpers); // Resolve with the rendered view. deferred.resolve(result, dataCopy); // If there's a `callback`, call it back with the result. callback && callback(result, dataCopy); }); // Return the deferred... return deferred; } else { // No deferreds! Render this bad boy. var response, // If there's a `callback` function async = isFunction(callback), // Get the `view` type deferred = get(view, async); // If we are `async`... if (async) { // Return the deferred response = deferred; // And fire callback with the rendered result. deferred.then(function (renderer) { callback(data ? renderer(data, helpers) : renderer); }) } else { // if the deferred is resolved, call the cached renderer instead // this is because it's possible, with recursive deferreds to // need to render a view while its deferred is _resolving_. A _resolving_ deferred // is a deferred that was just resolved and is calling back it's success callbacks. // If a new success handler is called while resoliving, it does not get fired by // jQuery's deferred system. So instead of adding a new callback // we use the cached renderer. // We also add __view_id on the deferred so we can look up it's cached renderer. // In the future, we might simply store either a deferred or the cached result. if (deferred.state() === "resolved" && deferred.__view_id) { var currentRenderer = $view.cachedRenderers[deferred.__view_id]; return data ? currentRenderer(data, helpers) : currentRenderer; } else { // Otherwise, the deferred is complete, so // set response to the result of the rendering. deferred.then(function (renderer) { response = data ? renderer(data, helpers) : renderer; }); } } return response; } }, registerView: function (id, text, type, def) { // Get the renderer function. var func = (type || $view.types[$view.ext]).renderer(id, text); def = def || new can.Deferred(); // Cache if we are caching. if ($view.cache) { $view.cached[id] = def; def.__view_id = id; $view.cachedRenderers[id] = func; } // Return the objects for the response's `dataTypes` // (in this case view). return def.resolve(func); } }); // Makes sure there's a template, if not, have `steal` provide a warning. var checkText = function (text, url) { if (!text.length) { throw "can.view: No template or empty template:" + url; } }, // `Returns a `view` renderer deferred. // `url` - The url to the template. // `async` - If the ajax request should be asynchronous. // Returns a deferred. get = function (url, async) { var suffix = url.match(/\.[\w\d]+$/), type, // If we are reading a script element for the content of the template, // `el` will be set to that script element. el, // A unique identifier for the view (used for caching). // This is typically derived from the element id or // the url for the template. id, // The ajax request used to retrieve the template content. jqXHR; //If the url has a #, we assume we want to use an inline template //from a script element and not current page's HTML if (url.match(/^#/)) { url = url.substr(1); } // If we have an inline template, derive the suffix from the `text/???` part. // This only supports `<script>` tags. if (el = document.getElementById(url)) { suffix = "." + el.type.match(/\/(x\-)?(.+)/)[2]; } // If there is no suffix, add one. if (!suffix && !$view.cached[url]) { url += (suffix = $view.ext); } if (can.isArray(suffix)) { suffix = suffix[0] } // Convert to a unique and valid id. id = $view.toId(url); // If an absolute path, use `steal` to get it. // You should only be using `//` if you are using `steal`. if (url.match(/^\/\//)) { var sub = url.substr(2); url = !window.steal ? sub : steal.config().root.mapJoin(sub); } // Set the template engine type. type = $view.types[suffix]; // If it is cached, if ($view.cached[id]) { // Return the cached deferred renderer. return $view.cached[id]; // Otherwise if we are getting this from a `<script>` element. } else if (el) { // Resolve immediately with the element's `innerHTML`. return $view.registerView(id, el.innerHTML, type); } else { // Make an ajax request for text. var d = new can.Deferred(); can.ajax({ async: async, url: url, dataType: "text", error: function (jqXHR) { checkText("", url); d.reject(jqXHR); }, success: function (text) { // Make sure we got some text back. checkText(text, url); $view.registerView(id, text, type, d) } }); return d; } }, // Gets an `array` of deferreds from an `object`. // This only goes one level deep. getDeferreds = function (data) { var deferreds = []; // pull out deferreds if (can.isDeferred(data)) { return [data] } else { for (var prop in data) { if (can.isDeferred(data[prop])) { deferreds.push(data[prop]); } } } return deferreds; }, // Gets the useful part of a resolved deferred. // This is for `model`s and `can.ajax` that resolve to an `array`. usefulPart = function (resolved) { return can.isArray(resolved) && resolved[1] === 'success' ? resolved[0] : resolved }; can.extend($view, { register: function (info) { this.types["." + info.suffix] = info; $view[info.suffix] = function (id, text) { if (!text) { // Return a nameless renderer var renderer = function () { return $view.frag(renderer.render.apply(this, arguments)); } renderer.render = function () { var renderer = info.renderer(null, id); return renderer.apply(renderer, arguments); } return renderer; } $view.preload(id, info.renderer(id, text)); return can.view(id); } }, registerScript: function (type, id, src) { return "can.view.preload('" + id + "'," + $view.types["." + type].script(id, src) + ");"; }, preload: function (id, renderer) { $view.cached[id] = new can.Deferred().resolve(function (data, helpers) { return renderer.call(data, data, helpers); }); function frag() { return $view.frag(renderer.apply(this, arguments)); } // expose the renderer for mustache frag.render = renderer; return frag; } }); return can; })(module["can/util/jquery/jquery.js"]); // ## can/view/scanner.js module['can/view/scanner.js'] = (function (can) { var newLine = /(\r|\n)+/g, tagToContentPropMap = { option: "textContent", textarea: "value" }, // Escapes characters starting with `\`. clean = function (content) { return content.split('\\').join("\\\\").split("\n").join("\\n").split('"').join('\\"').split("\t").join("\\t"); }, reverseTagMap = { tr: "tbody", option: "select", td: "tr", th: "tr", li: "ul" }, // Returns a tagName to use as a temporary placeholder for live content // looks forward ... could be slow, but we only do it when necessary getTag = function (tagName, tokens, i) { // if a tagName is provided, use that if (tagName) { return tagName; } else { // otherwise go searching for the next two tokens like "<",TAG while (i < tokens.length) { if (tokens[i] == "<" && reverseTagMap[tokens[i + 1]]) { return reverseTagMap[tokens[i + 1]]; } i++; } } }, bracketNum = function (content) { return (--content.split("{").length) - (--content.split("}").length); }, myEval = function (script) { eval(script); }, attrReg = /([^\s]+)[\s]*=[\s]*$/, // Commands for caching. startTxt = 'var ___v1ew = [];', finishTxt = "return ___v1ew.join('')", put_cmd = "___v1ew.push(", insert_cmd = put_cmd, // Global controls (used by other functions to know where we are). // Are we inside a tag? htmlTag = null, // Are we within a quote within a tag? quote = null, // What was the text before the current quote? (used to get the `attr` name) beforeQuote = null, // Whether a rescan is in progress rescan = null, // Used to mark where the element is. status = function () { // `t` - `1`. // `h` - `0`. // `q` - String `beforeQuote`. return quote ? "'" + beforeQuote.match(attrReg)[1] + "'" : (htmlTag ? 1 : 0); }; can.view.Scanner = Scanner = function (options) { // Set options on self can.extend(this, { text: {}, tokens: [] }, options); // Cache a token lookup this.tokenReg = []; this.tokenSimple = { "<": "<", ">": ">", '"': '"', "'": "'" }; this.tokenComplex = []; this.tokenMap = {}; for (var i = 0, token; token = this.tokens[i]; i++) { // Save complex mappings (custom regexp) if (token[2]) { this.tokenReg.push(token[2]); this.tokenComplex.push({ abbr: token[1], re: new RegExp(token[2]), rescan: token[3] }); } // Save simple mappings (string only, no regexp) else { this.tokenReg.push(token[1]); this.tokenSimple[token[1]] = token[0]; } this.tokenMap[token[0]] = token[1]; } // Cache the token registry. this.tokenReg = new RegExp("(" + this.tokenReg.slice(0).concat(["<", ">", '"', "'"]).join("|") + ")", "g"); }; Scanner.prototype = { helpers: [ { name: /\s*\(([\$\w]+)\)\s*->([^\n]*)/, fn: function (content) { var quickFunc = /\s*\(([\$\w]+)\)\s*->([^\n]*)/, parts = content.match(quickFunc); return "function(__){var " + parts[1] + "=can.$(__);" + parts[2] + "}"; } }], scan: function (source, name) { var tokens = [], last = 0, simple = this.tokenSimple, complex = this.tokenComplex; source = source.replace(newLine, "\n"); source.replace(this.tokenReg, function (whole, part) { // offset is the second to last argument var offset = arguments[arguments.length - 2]; // if the next token starts after the last token ends // push what's in between if (offset > last) { tokens.push(source.substring(last, offset)); } // push the simple token (if there is one) if (simple[whole]) { tokens.push(whole); } // otherwise lookup complex tokens else { for (var i = 0, token; token = complex[i]; i++) { if (token.re.test(whole)) { tokens.push(token.abbr); // Push a rescan function if one exists if (token.rescan) { tokens.push(token.rescan(part)); } break; } } } // update the position of the last part of the last token last = offset + part.length; }); // if there's something at the end, add it if (last < source.length) { tokens.push(source.substr(last)); } var content = '', buff = [startTxt + (this.text.start || '')], // Helper `function` for putting stuff in the view concat. put = function (content, bonus) { buff.push(put_cmd, '"', clean(content), '"' + (bonus || '') + ');'); }, // A stack used to keep track of how we should end a bracket // `}`. // Once we have a `<%= %>` with a `leftBracket`, // we store how the file should end here (either `))` or `;`). endStack = [], // The last token, used to remember which tag we are in. lastToken, // The corresponding magic tag. startTag = null, // Was there a magic tag inside an html tag? magicInTag = false, // The current tag name. tagName = '', // stack of tagNames tagNames = [], // Declared here. bracketCount, i = 0, token, tmap = this.tokenMap; // Reinitialize the tag state goodness. htmlTag = quote = beforeQuote = null; for (; (token = tokens[i++]) !== undefined;) { if (startTag === null) { switch (token) { case tmap.left: case tmap.escapeLeft: case tmap.returnLeft: magicInTag = htmlTag && 1; case tmap.commentLeft: // A new line -- just add whatever content within a clean. // Reset everything. startTag = token; if (content.length) { put(content); } content = ''; break; case tmap.escapeFull: // This is a full line escape (a line that contains only whitespace and escaped logic) // Break it up into escape left and right magicInTag = htmlTag && 1; rescan = 1; startTag = tmap.escapeLeft; if (content.length) { put(content); } rescan = tokens[i++]; content = rescan.content || rescan; if (rescan.before) { put(rescan.before); } tokens.splice(i, 0, tmap.right); break; case tmap.commentFull: // Ignore full line comments. break; case tmap.templateLeft: content += tmap.left; break; case '<': // Make sure we are not in a comment. if (tokens[i].indexOf("!--") !== 0) { htmlTag = 1; magicInTag = 0; } content += token; break; case '>': htmlTag = 0; // content.substr(-1) doesn't work in IE7/8 var emptyElement = content.substr(content.length - 1) == "/"; // if there was a magic tag // or it's an element that has text content between its tags, // but content is not other tags add a hookup // TODO: we should only add `can.EJS.pending()` if there's a magic tag // within the html tags. if (magicInTag || tagToContentPropMap[tagNames[tagNames.length - 1]]) { // make sure / of /> is on the left of pending if (emptyElement) { put(content.substr(0, content.length - 1), ",can.view.pending(),\"/>\""); } else { put(content, ",can.view.pending(),\">\""); } content = ''; } else { content += token; } // if it's a tag like <input/> if (emptyElement) { // remove the current tag in the stack tagNames.pop(); // set the current tag to the previous parent tagName = tagNames[tagNames.length - 1]; } break; case "'": case '"': // If we are in an html tag, finding matching quotes. if (htmlTag) { // We have a quote and it matches. if (quote && quote === token) { // We are exiting the quote. quote = null; // Otherwise we are creating a quote. // TODO: does this handle `\`? } else if (quote === null) { quote = token; beforeQuote = lastToken; } } default: // Track the current tag if (lastToken === '<') { tagName = token.split(/\s/)[0]; if (tagName.indexOf("/") === 0 && tagNames.pop() === tagName.substr(1)) { // set tagName to the last tagName // if there are no more tagNames, we'll rely on getTag. tagName = tagNames[tagNames.length - 1]; } else { tagNames.push(tagName); } } content += token; break; } } else { // We have a start tag. switch (token) { case tmap.right: case tmap.returnRight: switch (startTag) { case tmap.left: // Get the number of `{ minus }` bracketCount = bracketNum(content); // We are ending a block. if (bracketCount == 1) { // We are starting on. buff.push(insert_cmd, "can.view.txt(0,'" + getTag(tagName, tokens, i) + "'," + status() + ",this,function(){", startTxt, content); endStack.push({ before: "", after: finishTxt + "}));\n" }); } else { // How are we ending this statement? last = // If the stack has value and we are ending a block... endStack.length && bracketCount == -1 ? // Use the last item in the block stack. endStack.pop() : // Or use the default ending. { after: ";" }; // If we are ending a returning block, // add the finish text which returns the result of the // block. if (last.before) { buff.push(last.before); } // Add the remaining content. buff.push(content, ";", last.after); } break; case tmap.escapeLeft: case tmap.returnLeft: // We have an extra `{` -> `block`. // Get the number of `{ minus }`. bracketCount = bracketNum(content); // If we have more `{`, it means there is a block. if (bracketCount) { // When we return to the same # of `{` vs `}` end with a `doubleParent`. endStack.push({ before: finishTxt, after: "}));" }); } var escaped = startTag === tmap.escapeLeft ? 1 : 0, commands = { insert: insert_cmd, tagName: getTag(tagName, tokens, i), status: status() }; for (var ii = 0; ii < this.helpers.length; ii++) { // Match the helper based on helper // regex name value var helper = this.helpers[ii]; if (helper.name.test(content)) { content = helper.fn(content, commands); // dont escape partials if (helper.name.source == /^>[\s]*\w*/.source) { escaped = 0; } break; } } // Handle special cases if (typeof content == 'object') { if (content.raw) { buff.push(content.raw); } } else { // If we have `<%== a(function(){ %>` then we want // `can.EJS.text(0,this, function(){ return a(function(){ var _v1ew = [];`. buff.push(insert_cmd, "can.view.txt(" + escaped + ",'" + tagName + "'," + status() + ",this,function(){ " + (this.text.escape || '') + "return ", content, // If we have a block. bracketCount ? // Start with startTxt `"var _v1ew = [];"`. startTxt : // If not, add `doubleParent` to close push and text. "}));"); } if (rescan && rescan.after && rescan.after.length) { put(rescan.after.length); rescan = null; } break; } startTag = null; content = ''; break; case tmap.templateLeft: content += tmap.left; break; default: content += token; break; } } lastToken = token; } // Put it together... if (content.length) { // Should be `content.dump` in Ruby. put(content); } buff.push(";"); var template = buff.join(''), out = { out: 'with(_VIEW) { with (_CONTEXT) {' + template + " " + finishTxt + "}}" }; // Use `eval` instead of creating a function, because it is easier to debug. myEval.call(out, 'this.fn = (function(_CONTEXT,_VIEW){' + out.out + '});\r\n//@ sourceURL=' + name + ".js"); return out; } }; return Scanner; })(module["can/view/view.js"]); // ## can/observe/compute/compute.js module['can/observe/compute/compute.js'] = (function (can) { // returns the // - observes and attr methods are called by func // - the value returned by func // ex: `{value: 100, observed: [{obs: o, attr: "completed"}]}` var getValueAndObserved = function (func, self) { var oldReading; if (can.Observe) { // Set a callback on can.Observe to know // when an attr is read. // Keep a reference to the old reader // if there is one. This is used // for nested live binding. oldReading = can.Observe.__reading; can.Observe.__reading = function (obj, attr) { // Add the observe and attr that was read // to `observed` observed.push({ obj: obj, attr: attr }); }; } var observed = [], // Call the "wrapping" function to get the value. `observed` // will have the observe/attribute pairs that were read. value = func.call(self); // Set back so we are no longer reading. if (can.Observe) { can.Observe.__reading = oldReading; } return { value: value, observed: observed }; }, // Calls `callback(newVal, oldVal)` everytime an observed property // called within `getterSetter` is changed and creates a new result of `getterSetter`. // Also returns an object that can teardown all event handlers. computeBinder = function (getterSetter, context, callback) { // track what we are observing var observing = {}, // a flag indicating if this observe/attr pair is already bound matched = true, // the data to return data = { // we will maintain the value while live-binding is taking place value: undefined, // a teardown method that stops listening teardown: function () { for (var name in observing) { var ob = observing[name]; ob.observe.obj.unbind(ob.observe.attr, onchanged); delete observing[name]; } } }, batchNum; // when a property value is changed var onchanged = function (ev) { if (ev.batchNum === undefined || ev.batchNum !== batchNum) { // store the old value var oldValue = data.value, // get the new value newvalue = getValueAndBind(); // update the value reference (in case someone reads) data.value = newvalue; // if a change happened if (newvalue !== oldValue) { callback(newvalue, oldValue); } batchNum = batchNum = ev.batchNum; } }; // gets the value returned by `getterSetter` and also binds to any attributes // read by the call var getValueAndBind = function () { var info = getValueAndObserved(getterSetter, context), newObserveSet = info.observed; var value = info.value; matched = !matched; // go through every attribute read by this observe can.each(newObserveSet, function (ob) { // if the observe/attribute pair is being observed if (observing[ob.obj._cid + "|" + ob.attr]) { // mark at as observed observing[ob.obj._cid + "|" + ob.attr].matched = matched; } else { // otherwise, set the observe/attribute on oldObserved, marking it as being observed observing[ob.obj._cid + "|" + ob.attr] = { matched: matched, observe: ob }; ob.obj.bind(ob.attr, onchanged); } }); // Iterate through oldObserved, looking for observe/attributes // that are no longer being bound and unbind them for (var name in observing) { var ob = observing[name]; if (ob.matched !== matched) { ob.observe.obj.unbind(ob.observe.attr, onchanged); delete observing[name]; } } return value; }; // set the initial value data.value = getValueAndBind(); data.isListening = !can.isEmptyObject(observing); return data; } // if no one is listening ... we can not calculate every time can.compute = function (getterSetter, context) { if (getterSetter && getterSetter.isComputed) { return getterSetter; } // get the value right away // TODO: eventually we can defer this until a bind or a read var computedData, bindings = 0, computed, canbind = true; if (typeof getterSetter === "function") { computed = function (value) { if (value === undefined) { // we are reading if (computedData) { // If another compute is calling this compute for the value, // it needs to bind to this compute's change so it will re-compute // and re-bind when this compute changes. if (bindings && can.Observe.__reading) { can.Observe.__reading(computed, 'change'); } return computedData.value; } else { return getterSetter.call(context || this) } } else { return getterSetter.apply(context || this, arguments) } } } else { // we just gave it a value computed = function (val) { if (val === undefined) { // If observing, record that the value is being read. if (can.Observe.__reading) { can.Observe.__reading(computed, 'change'); } return getterSetter; } else { var old = getterSetter; getterSetter = val; if (old !== val) { can.Observe.triggerBatch(computed, "change", [val, old]); } return val; } } canbind = false; } computed.isComputed = true; can.cid(computed, "compute") computed.bind = function (ev, handler) { can.addEvent.apply(computed, arguments); if (bindings === 0 && canbind) { // setup live-binding computedData = computeBinder(getterSetter, context || this, function (newValue, oldValue) { can.Observe.triggerBatch(computed, "change", [newValue, oldValue]) }); } bindings++; } computed.unbind = function (ev, handler) { can.removeEvent.apply(computed, arguments); bindings--; if (bindings === 0 && canbind) { computedData.teardown(); } }; return computed; }; can.compute.binder = computeBinder; return can.compute; })(module["can/util/jquery/jquery.js"]); // ## can/view/render.js module['can/view/render.js'] = (function (can) { // text node expando test var canExpando = true; try { document.createTextNode('')._ = 0; } catch (ex) { canExpando = false; } var attrMap = { "class": "className", "value": "value", "innerText": "innerText", "textContent": "textContent" }, tagMap = { "": "span", table: "tbody", tr: "td", ol: "li", ul: "li", tbody: "tr", thead: "tr", tfoot: "tr", select: "option", optgroup: "option" }, attributePlaceholder = '__!!__', attributeReplace = /__!!__/g, tagToContentPropMap = { option: "textContent" in document.createElement("option") ? "textContent" : "innerText", textarea: "value" }, bool = can.each(["checked", "disabled", "readonly", "required"], function (n) { attrMap[n] = n; }), // a helper to get the parentNode for a given element el // if el is in a documentFragment, it will return defaultParentNode getParentNode = function (el, defaultParentNode) { return defaultParentNode && el.parentNode.nodeType === 11 ? defaultParentNode : el.parentNode; }, setAttr = function (el, attrName, val) { var tagName = el.nodeName.toString().toLowerCase(), prop = attrMap[attrName]; // if this is a special property if (prop) { // set the value as true / false el[prop] = can.inArray(attrName, bool) > -1 ? true : val; if (prop === "value" && (tagName === "input" || tagName === "textarea")) { el.defaultValue = val; } } else { el.setAttribute(attrName, val); } }, getAttr = function (el, attrName) { // Default to a blank string for IE7/8 return (attrMap[attrName] ? el[attrMap[attrName]] : el.getAttribute(attrName)) || ''; }, removeAttr = function (el, attrName) { if (can.inArray(attrName, bool) > -1) { el[attrName] = false; } else { el.removeAttribute(attrName); } }, pendingHookups = [], // Returns text content for anything other than a live-binding contentText = function (input) { // If it's a string, return. if (typeof input == 'string') { return input; } // If has no value, return an empty string. if (!input && input !== 0) { return ''; } // If it's an object, and it has a hookup method. var hook = (input.hookup && // Make a function call the hookup method. function (el, id) { input.hookup.call(input, el, id); }) || // Or if it's a `function`, just use the input. (typeof input == 'function' && input); // Finally, if there is a `function` to hookup on some dom, // add it to pending hookups. if (hook) { pendingHookups.push(hook); return ''; } // Finally, if all else is `false`, `toString()` it. return "" + input; }, // Returns escaped/sanatized content for anything other than a live-binding contentEscape = function (txt) { return (typeof txt == 'string' || typeof txt == 'number') ? can.esc(txt) : contentText(txt); }, // a mapping of element ids to nodeList ids nodeMap = {}, // a mapping of ids to text nodes textNodeMap = {}, // a mapping of nodeList ids to nodeList nodeListMap = {}, expando = "ejs_" + Math.random(), _id = 0, id = function (node) { if (canExpando || node.nodeType !== 3) { if (node[expando]) { return node[expando]; } else { return node[expando] = (node.nodeName ? "element_" : "obj_") + (++_id); } } else { for (var textNodeID in textNodeMap) { if (textNodeMap[textNodeID] === node) { return textNodeID; } } textNodeMap["text_" + (++_id)] = node; return "text_" + _id; } }, // removes a nodeListId from a node's nodeListIds removeNodeListId = function (node, nodeListId) { var nodeListIds = nodeMap[id(node)]; if (nodeListIds) { var index = can.inArray(nodeListId, nodeListIds); if (index >= 0) { nodeListIds.splice(index, 1); } if (!nodeListIds.length) { delete nodeMap[id(node)]; } } }, addNodeListId = function (node, nodeListId) { var nodeListIds = nodeMap[id(node)]; if (!nodeListIds) { nodeListIds = nodeMap[id(node)] = []; } nodeListIds.push(nodeListId); }, tagChildren = function (tagName) { var newTag = tagMap[tagName] || "span"; if (newTag === "span") { //innerHTML in IE doesn't honor leading whitespace after empty elements return "@@!!@@"; } return "<" + newTag + ">" + tagChildren(newTag) + "</" + newTag + ">"; }; can.extend(can.view, { pending: function () { // TODO, make this only run for the right tagName var hooks = pendingHookups.slice(0); lastHookups = hooks; pendingHookups = []; return can.view.hook(function (el) { can.each(hooks, function (fn) { fn(el); }); }); }, registerNode: function (nodeList) { var nLId = id(nodeList); nodeListMap[nLId] = nodeList; can.each(nodeList, function (node) { addNodeListId(node, nLId); }); }, unregisterNode: function (nodeList) { var nLId = id(nodeList); can.each(nodeList, function (node) { removeNodeListId(node, nLId); }); delete nodeListMap[nLId]; }, txt: function (escape, tagName, status, self, func) { // call the "wrapping" function and get the binding information var binding = can.compute.binder(func, self, function (newVal, oldVal) { // call the update method we will define for each // type of attribute update(newVal, oldVal); }); // If we had no observes just return the value returned by func. if (!binding.isListening) { return (escape || status !== 0 ? contentEscape : contentText)(binding.value); } // The following are helper methods or varaibles that will // be defined by one of the various live-updating schemes. // The parent element we are listening to for teardown var parentElement, nodeList, teardown = function () { binding.teardown(); if (nodeList) { can.view.unregisterNode(nodeList); } }, // if the parent element is removed, teardown the binding setupTeardownOnDestroy = function (el) { can.bind.call(el, 'destroyed', teardown); parentElement = el; }, // if there is no parent, undo bindings teardownCheck = function (parent) { if (!parent) { teardown(); can.unbind.call(parentElement, 'destroyed', teardown); } }, // the tag type to insert tag = (tagMap[tagName] || "span"), // this will be filled in if binding.isListening update, // the property (instead of innerHTML elements) to adjust. For // example options should use textContent contentProp = tagToContentPropMap[tagName]; // The magic tag is outside or between tags. if (status === 0 && !contentProp) { // Return an element tag with a hookup in place of the content return "<" + tag + can.view.hook( escape ? // If we are escaping, replace the parentNode with // a text node who's value is `func`'s return value. function (el, parentNode) { // updates the text of the text node update = function (newVal) { node.nodeValue = "" + newVal; teardownCheck(node.parentNode); }; var parent = getParentNode(el, parentNode), node = document.createTextNode(binding.value); parent.insertBefore(node, el); parent.removeChild(el); setupTeardownOnDestroy(parent); } : // If we are not escaping, replace the parentNode with a // documentFragment created as with `func`'s return value. function (span, parentNode) { // updates the elements with the new content update = function (newVal) { // is this still part of the DOM? var attached = nodes[0].parentNode; // update the nodes in the DOM with the new rendered value if (attached) { makeAndPut(newVal); } teardownCheck(nodes[0].parentNode); }; // make sure we have a valid parentNode parentNode = getParentNode(span, parentNode); // A helper function to manage inserting the contents // and removing the old contents var nodes, makeAndPut = function (val) { // create the fragment, but don't hook it up // we need to insert it into the document first var frag = can.view.frag(val, parentNode), // keep a reference to each node newNodes = can.makeArray(frag.childNodes), last = nodes ? nodes[nodes.length - 1] : span; // Insert it in the `document` or `documentFragment` if (last.nextSibling) { last.parentNode.insertBefore(frag, last.nextSibling); } else { last.parentNode.appendChild(frag); } // nodes hasn't been set yet if (!nodes) { can.remove(can.$(span)); nodes = newNodes; // set the teardown nodeList nodeList = nodes; can.view.registerNode(nodes); } else { // Update node Array's to point to new nodes // and then remove the old nodes. // It has to be in this order for Mootools // and IE because somehow, after an element // is removed from the DOM, it loses its // expando values. var nodesToRemove = can.makeArray(nodes); can.view.replace(nodes, newNodes); can.remove(can.$(nodesToRemove)); } }; // nodes are the nodes that any updates will replace // at this point, these nodes could be part of a documentFragment makeAndPut(binding.value, [span]); setupTeardownOnDestroy(parentNode); //children have to be properly nested HTML for buildFragment to work properly }) + ">" + tagChildren(tag) + "</" + tag + ">"; // In a tag, but not in an attribute } else if (status === 1) { // remember the old attr name var attrName = binding.value.replace(/['"]/g, '').split('=')[0]; pendingHookups.push(function (el) { update = function (newVal) { var parts = (newVal || "").replace(/['"]/g, '').split('='), newAttrName = parts[0]; // Remove if we have a change and used to have an `attrName`. if ((newAttrName != attrName) && attrName) { removeAttr(el, attrName); } // Set if we have a new `attrName`. if (newAttrName) { setAttr(el, newAttrName, parts[1]); attrName = newAttrName; } }; setupTeardownOnDestroy(el); }); return binding.value; } else { // In an attribute... var attributeName = status === 0 ? contentProp : status; // if the magic tag is inside the element, like `<option><% TAG %></option>`, // we add this hookup to the last element (ex: `option`'s) hookups. // Otherwise, the magic tag is in an attribute, just add to the current element's // hookups. (status === 0 ? lastHookups : pendingHookups).push(function (el) { // update will call this attribute's render method // and set the attribute accordingly update = function () { setAttr(el, attributeName, hook.render(), contentProp); }; var wrapped = can.$(el), hooks; // Get the list of hookups or create one for this element. // Hooks is a map of attribute names to hookup `data`s. // Each hookup data has: // `render` - A `function` to render the value of the attribute. // `funcs` - A list of hookup `function`s on that attribute. // `batchNum` - The last event `batchNum`, used for performance. hooks = can.data(wrapped, 'hooks'); if (!hooks) { can.data(wrapped, 'hooks', hooks = {}); } // Get the attribute value. var attr = getAttr(el, attributeName, contentProp), // Split the attribute value by the template. // Only split out the first __!!__ so if we have multiple hookups in the same attribute, // they will be put in the right spot on first render parts = attr.split(attributePlaceholder), goodParts = [], hook; goodParts.push(parts.shift(), parts.join(attributePlaceholder)); // If we already had a hookup for this attribute... if (hooks[attributeName]) { // Just add to that attribute's list of `function`s. hooks[attributeName].bindings.push(binding); } else { // Create the hookup data. hooks[attributeName] = { render: function () { var i = 0, newAttr = attr.replace(attributeReplace, function () { return contentText(hook.bindings[i++].value); }); return newAttr; }, bindings: [binding], batchNum: undefined }; } // Save the hook for slightly faster performance. hook = hooks[attributeName]; // Insert the value in parts. goodParts.splice(1, 0, binding.value); // Set the attribute. setAttr(el, attributeName, goodParts.join(""), contentProp); // Bind on change. //liveBind(observed, el, binder,oldObserved); setupTeardownOnDestroy(el); }); return attributePlaceholder; } }, replace: function (oldNodeList, newNodes) { // for each node in the node list oldNodeList = can.makeArray(oldNodeList); can.each(oldNodeList, function (node) { // for each nodeList the node is in can.each(can.makeArray(nodeMap[id(node)]), function (nodeListId) { var nodeList = nodeListMap[nodeListId], startIndex = can.inArray(node, nodeList), endIndex = can.inArray(oldNodeList[oldNodeList.length - 1], nodeList); // remove this nodeListId from each node if (startIndex >= 0 && endIndex >= 0) { for (var i = startIndex; i <= endIndex; i++) { var n = nodeList[i]; removeNodeListId(n, nodeListId); } // swap in new nodes into the nodeLIst nodeList.splice.apply(nodeList, [startIndex, endIndex - startIndex + 1].concat(newNodes)); // tell these new nodes they belong to the nodeList can.each(newNodes, function (node) { addNodeListId(node, nodeListId); }); } else { can.view.unregisterNode(nodeList); } }); }); }, canExpando: canExpando, // Node mappings textNodeMap: textNodeMap, nodeMap: nodeMap, nodeListMap: nodeListMap }); return can; })(module["can/view/view.js"], module["can/util/string/string.js"]); // ## can/view/mustache/mustache.js module['can/view/mustache/mustache.js'] = (function (can) { // # mustache.js // `can.Mustache`: The Mustache templating engine. // See the [Transformation](#section-29) section within *Scanning Helpers* for a detailed explanation // of the runtime render code design. The majority of the Mustache engine implementation // occurs within the *Transformation* scanning helper. // ## Initialization // Define the view extension. can.view.ext = ".mustache"; // ### Setup internal helper variables and functions. // An alias for the context variable used for tracking a stack of contexts. // This is also used for passing to helper functions to maintain proper context. var CONTEXT = '___c0nt3xt', // An alias for the variable used for the hash object that can be passed // to helpers via `options.hash`. HASH = '___h4sh', // An alias for the function that adds a new context to the context stack. STACK = '___st4ck', // An alias for the most used context stacking call. CONTEXT_STACK = STACK + '(' + CONTEXT + ',this)', CONTEXT_OBJ = '{context:' + CONTEXT_STACK + ',options:options}', isObserve = function (obj) { return obj !== null && can.isFunction(obj.attr) && obj.constructor && !! obj.constructor.canMakeObserve; }, isArrayLike = function (obj) { return obj && obj.splice && typeof obj.length == 'number'; }, // ## Mustache Mustache = function (options, helpers) { // Support calling Mustache without the constructor. // This returns a function that renders the template. if (this.constructor != Mustache) { var mustache = new Mustache(options); return function (data, options) { return mustache.render(data, options); }; } // If we get a `function` directly, it probably is coming from // a `steal`-packaged view. if (typeof options == "function") { this.template = { fn: options }; return; } // Set options on self. can.extend(this, options); this.template = this.scanner.scan(this.text, this.name); }; // Put Mustache on the `can` object. can.Mustache = window.Mustache = Mustache; Mustache.prototype. render = function (object, options) { object = object || {}; options = options || {}; if (!options.helpers && !options.partials) { options.helpers = options; } return this.template.fn.call(object, object, { _data: object, options: options }); }; can.extend(Mustache.prototype, { // Share a singleton scanner for parsing templates. scanner: new can.view.Scanner({ // A hash of strings for the scanner to inject at certain points. text: { // This is the logic to inject at the beginning of a rendered template. // This includes initializing the `context` stack. start: 'var ' + CONTEXT + ' = this && this.' + STACK + ' ? this : []; ' + CONTEXT + '.' + STACK + ' = true;' + 'var ' + STACK + ' = function(context, self) {' + 'var s;' + 'if (arguments.length == 1 && context) {' + 's = !context.' + STACK + ' ? [context] : context;' + // Handle helpers with custom contexts (#228) '} else if (!context.' + STACK + ') {' + 's = [self, context];' + '} else {' + 's = context && context.' + STACK + ' ? context.concat([self]) : ' + STACK + '(context).concat([self]);' + '}' + 'return (s.' + STACK + ' = true) && s;' + '};' }, // An ordered token registry for the scanner. // This needs to be ordered by priority to prevent token parsing errors. // Each token follows the following structure: // [ // // Which key in the token map to match. // "tokenMapName", // // A simple token to match, like "{{". // "token", // // Optional. A complex (regexp) token to match that // // overrides the simple token. // "[\\s\\t]*{{", // // Optional. A function that executes advanced // // manipulation of the matched content. This is // // rarely used. // function(content){ // return content; // } // ] tokens: [ // Return unescaped ["returnLeft", "{{{", "{{[{&]"], // Full line comments ["commentFull", "{{!}}", "^[\\s\\t]*{{!.+?}}\\n"], // Inline comments ["commentLeft", "{{!", "(\\n[\\s\\t]*{{!|{{!)"], // Full line escapes // This is used for detecting lines with only whitespace and an escaped tag ["escapeFull", "{{}}", "(^[\\s\\t]*{{[#/^][^}]+?}}\\n|\\n[\\s\\t]*{{[#/^][^}]+?}}\\n|\\n[\\s\\t]*{{[#/^][^}]+?}}$)", function (content) { return { before: /^\n.+?\n$/.test(content) ? '\n' : '', content: content.match(/\{\{(.+?)\}\}/)[1] || '' }; }], // Return escaped ["escapeLeft", "{{"], // Close return unescaped ["returnRight", "}}}"], // Close tag ["right", "}}"]], // ## Scanning Helpers // This is an array of helpers that transform content that is within escaped tags like `{{token}}`. These helpers are solely for the scanning phase; they are unrelated to Mustache/Handlebars helpers which execute at render time. Each helper has a definition like the following: // { // // The content pattern to match in order to execute. // // Only the first matching helper is executed. // name: /pattern to match/, // // The function to transform the content with. // // @param {String} content The content to transform. // // @param {Object} cmd Scanner helper data. // // { // // insert: "insert command", // // tagName: "div", // // status: 0 // // } // fn: function(content, cmd) { // return 'for text injection' || // { raw: 'to bypass text injection' }; // } // } helpers: [ // ### Partials // Partials begin with a greater than sign, like {{> box}}. // Partials are rendered at runtime (as opposed to compile time), // so recursive partials are possible. Just avoid infinite loops. // For example, this template and partial: // base.mustache: // <h2>Names</h2> // {{#names}} // {{> user}} // {{/names}} // user.mustache: // <strong>{{name}}</strong> { name: /^>[\s]*\w*/, fn: function (content, cmd) { // Get the template name and call back into the render method, // passing the name and the current context. var templateName = can.trim(content.replace(/^>\s?/, '')).replace(/["|']/g, ""); return "options.partials && options.partials['" + templateName + "'] ? can.Mustache.renderPartial(options.partials['" + templateName + "']," + CONTEXT_STACK + ".pop(),options) : can.Mustache.render('" + templateName + "', " + CONTEXT_STACK + ")"; } }, // ### Data Hookup // This will attach the data property of `this` to the element // its found on using the first argument as the data attribute // key. // For example: // <li id="nameli" {{ data 'name' }}></li> // then later you can access it like: // can.$('#nameli').data('name'); { name: /^\s*data\s/, fn: function (content, cmd) { var attr = content.match(/["|'](.*)["|']/)[1]; // return a function which calls `can.data` on the element // with the attribute name with the current context. return "can.proxy(function(__){can.data(can.$(__),'" + attr + "', this.pop()); }, " + CONTEXT_STACK + ")"; } }, // ### Transformation (default) // This transforms all content to its interpolated equivalent, // including calls to the corresponding helpers as applicable. // This outputs the render code for almost all cases. // #### Definitions // * `context` - This is the object that the current rendering context operates within. // Each nested template adds a new `context` to the context stack. // * `stack` - Mustache supports nested sections, // each of which add their own context to a stack of contexts. // Whenever a token gets interpolated, it will check for a match against the // last context in the stack, then iterate through the rest of the stack checking for matches. // The first match is the one that gets returned. // * `Mustache.txt` - This serializes a collection of logic, optionally contained within a section. // If this is a simple interpolation, only the interpolation lookup will be passed. // If this is a section, then an `options` object populated by the truthy (`options.fn`) and // falsey (`options.inverse`) encapsulated functions will also be passed. This section handling // exists to support the runtime context nesting that Mustache supports. // * `Mustache.get` - This resolves an interpolation reference given a stack of contexts. // * `options` - An object containing methods for executing the inner contents of sections or helpers. // `options.fn` - Contains the inner template logic for a truthy section. // `options.inverse` - Contains the inner template logic for a falsey section. // `options.hash` - Contains the merged hash object argument for custom helpers. // #### Design // This covers the design of the render code that the transformation helper generates. // ##### Pseudocode // A detailed explanation is provided in the following sections, but here is some brief pseudocode // that gives a high level overview of what the generated render code does (with a template similar to // `"{{#a}}{{b.c.d.e.name}}{{/a}}" == "Phil"`). // *Initialize the render code.* // view = [] // context = [] // stack = fn { context.concat([this]) } // *Render the root section.* // view.push( "string" ) // view.push( can.view.txt( // *Render the nested section with `can.Mustache.txt`.* // txt( // *Add the current context to the stack.* // stack(), // *Flag this for truthy section mode.* // "#", // *Interpolate and check the `a` variable for truthyness using the stack with `can.Mustache.get`.* // get( "a", stack() ), // *Include the nested section's inner logic. // The stack argument is usually the parent section's copy of the stack, // but it can be an override context that was passed by a custom helper. // Sections can nest `0..n` times -- **NESTCEPTION**.* // { fn: fn(stack) { // *Render the nested section (everything between the `{{#a}}` and `{{/a}}` tokens).* // view = [] // view.push( "string" ) // view.push( // *Add the current context to the stack.* // stack(), // *Flag this as interpolation-only mode.* // null, // *Interpolate the `b.c.d.e.name` variable using the stack.* // get( "b.c.d.e.name", stack() ), // ) // view.push( "string" ) // *Return the result for the nested section.* // return view.join() // }} // ) // )) // view.push( "string" ) // *Return the result for the root section, which includes all nested sections.* // return view.join() // ##### Initialization // Each rendered template is started with the following initialization code: // var ___v1ew = []; // var ___c0nt3xt = []; // ___c0nt3xt.___st4ck = true; // var ___st4ck = function(context, self) { // var s; // if (arguments.length == 1 && context) { // s = !context.___st4ck ? [context] : context; // } else { // s = context && context.___st4ck // ? context.concat([self]) // : ___st4ck(context).concat([self]); // } // return (s.___st4ck = true) && s; // }; // The `___v1ew` is the the array used to serialize the view. // The `___c0nt3xt` is a stacking array of contexts that slices and expands with each nested section. // The `___st4ck` function is used to more easily update the context stack in certain situations. // Usually, the stack function simply adds a new context (`self`/`this`) to a context stack. // However, custom helpers will occasionally pass override contexts that need their own context stack. // ##### Sections // Each section, `{{#section}} content {{/section}}`, within a Mustache template generates a section // context in the resulting render code. The template itself is treated like a root section, with the // same execution logic as any others. Each section can have `0..n` nested sections within it. // Here's an example of a template without any descendent sections. // Given the template: `"{{a.b.c.d.e.name}}" == "Phil"` // Would output the following render code: // ___v1ew.push("\""); // ___v1ew.push(can.view.txt(1, '', 0, this, function() { // return can.Mustache.txt(___st4ck(___c0nt3xt, this), null, // can.Mustache.get("a.b.c.d.e.name", // ___st4ck(___c0nt3xt, this)) // ); // })); // ___v1ew.push("\" == \"Phil\""); // The simple strings will get appended to the view. Any interpolated references (like `{{a.b.c.d.e.name}}`) // will be pushed onto the view via `can.view.txt` in order to support live binding. // The function passed to `can.view.txt` will call `can.Mustache.txt`, which serializes the object data by doing // a context lookup with `can.Mustache.get`. // `can.Mustache.txt`'s first argument is a copy of the context stack with the local context `this` added to it. // This stack will grow larger as sections nest. // The second argument is for the section type. This will be `"#"` for truthy sections, `"^"` for falsey, // or `null` if it is an interpolation instead of a section. // The third argument is the interpolated value retrieved with `can.Mustache.get`, which will perform the // context lookup and return the approriate string or object. // Any additional arguments, if they exist, are used for passing arguments to custom helpers. // For nested sections, the last argument is an `options` object that contains the nested section's logic. // Here's an example of a template with a single nested section. // Given the template: `"{{#a}}{{b.c.d.e.name}}{{/a}}" == "Phil"` // Would output the following render code: // ___v1ew.push("\""); // ___v1ew.push(can.view.txt(0, '', 0, this, function() { // return can.Mustache.txt(___st4ck(___c0nt3xt, this), "#", // can.Mustache.get("a", ___st4ck(___c0nt3xt, this)), // [{ // _: function() { // return ___v1ew.join(""); // } // }, { // fn: function(___c0nt3xt) { // var ___v1ew = []; // ___v1ew.push(can.view.txt(1, '', 0, this, // function() { // return can.Mustache.txt( // ___st4ck(___c0nt3xt, this), // null, // can.Mustache.get("b.c.d.e.name", // ___st4ck(___c0nt3xt, this)) // ); // } // )); // return ___v1ew.join(""); // } // }] // ) // })); // ___v1ew.push("\" == \"Phil\""); // This is specified as a truthy section via the `"#"` argument. The last argument includes an array of helper methods used with `options`. // These act similarly to custom helpers: `options.fn` will be called for truthy sections, `options.inverse` will be called for falsey sections. // The `options._` function only exists as a dummy function to make generating the section nesting easier (a section may have a `fn`, `inverse`, // or both, but there isn't any way to determine that at compilation time). // Within the `fn` function is the section's render context, which in this case will render anything between the `{{#a}}` and `{{/a}}` tokens. // This function has `___c0nt3xt` as an argument because custom helpers can pass their own override contexts. For any case where custom helpers // aren't used, `___c0nt3xt` will be equivalent to the `___st4ck(___c0nt3xt, this)` stack created by its parent section. The `inverse` function // works similarly, except that it is added when `{{^a}}` and `{{else}}` are used. `var ___v1ew = []` is specified in `fn` and `inverse` to // ensure that live binding in nested sections works properly. // All of these nested sections will combine to return a compiled string that functions similar to EJS in its uses of `can.view.txt`. // #### Implementation { name: /^.*$/, fn: function (content, cmd) { var mode = false, result = []; // Trim the content so we don't have any trailing whitespace. content = can.trim(content); // Determine what the active mode is. // * `#` - Truthy section // * `^` - Falsey section // * `/` - Close the prior section // * `else` - Inverted section (only exists within a truthy/falsey section) if (content.length && (mode = content.match(/^([#^/]|else$)/))) { mode = mode[0]; switch (mode) { // Open a new section. case '#': case '^': result.push(cmd.insert + 'can.view.txt(0,\'' + cmd.tagName + '\',' + cmd.status + ',this,function(){ return '); break; // Close the prior section. case '/': return { raw: 'return ___v1ew.join("");}}])}));' }; break; } // Trim the mode off of the content. content = content.substring(1); } // `else` helpers are special and should be skipped since they don't // have any logic aside from kicking off an `inverse` function. if (mode != 'else') { var args = [], i = 0, hashing = false, arg, split, m; // Parse the helper arguments. // This needs uses this method instead of a split(/\s/) so that // strings with spaces can be correctly parsed. (can.trim(content) + ' ').replace(/((([^\s]+?=)?('.*?'|".*?"))|.*?)\s/g, function (whole, part) { args.push(part); }); // Start the content render block. result.push('can.Mustache.txt(' + CONTEXT_OBJ + ',' + (mode ? '"' + mode + '"' : 'null') + ','); // Iterate through the helper arguments, if there are any. for (; arg = args[i]; i++) { i && result.push(','); // Check for special helper arguments (string/number/boolean/hashes). if (i && (m = arg.match(/^(('.*?'|".*?"|[0-9.]+|true|false)|((.+?)=(('.*?'|".*?"|[0-9.]+|true|false)|(.+))))$/))) { // Found a native type like string/number/boolean. if (m[2]) { result.push(m[0]); } // Found a hash object. else { // Open the hash object. if (!hashing) { hashing = true; result.push('{' + HASH + ':{'); } // Add the key/value. result.push(m[4], ':', m[6] ? m[6] : 'can.Mustache.get("' + m[5].replace(/"/g, '\\"') + '",' + CONTEXT_OBJ + ')'); // Close the hash if this was the last argument. if (i == args.length - 1) { result.push('}}'); } } } // Otherwise output a normal interpolation reference. else { result.push('can.Mustache.get("' + // Include the reference name. arg.replace(/"/g, '\\"') + '",' + // Then the stack of context. CONTEXT_OBJ + // Flag as a helper method to aid performance, // if it is a known helper (anything with > 0 arguments). (i == 0 && args.length > 1 ? ',true' : ',false') + (i > 0 ? ',true' : ',false') + ')'); } } } // Create an option object for sections of code. mode && mode != 'else' && result.push(',[{_:function(){'); switch (mode) { // Truthy section case '#': result.push('return ___v1ew.join("");}},{fn:function(' + CONTEXT + '){var ___v1ew = [];'); break; // If/else section // Falsey section case 'else': case '^': result.push('return ___v1ew.join("");}},{inverse:function(' + CONTEXT + '){var ___v1ew = [];'); break; // Not a section default: result.push(');'); break; } // Return a raw result if there was a section, otherwise return the default string. result = result.join(''); return mode ? { raw: result } : result; } }] }) }); // Add in default scanner helpers first. // We could probably do this differently if we didn't 'break' on every match. var helpers = can.view.Scanner.prototype.helpers; for (var i = 0; i < helpers.length; i++) { Mustache.prototype.scanner.helpers.unshift(helpers[i]); }; Mustache.txt = function (context, mode, name) { // Grab the extra arguments to pass to helpers. var args = Array.prototype.slice.call(arguments, 3), // Create a default `options` object to pass to the helper. options = can.extend.apply(can, [{ fn: function () {}, inverse: function () {} }].concat(mode ? args.pop() : [])); var extra = {}; if (context.context) { extra = context.options; context = context.context; } // Check for a registered helper or a helper-like function. if (helper = (Mustache.getHelper(name, extra) || (can.isFunction(name) && !name.isComputed && { fn: name }))) { // Use the most recent context as `this` for the helper. var context = (context[STACK] && context[context.length - 1]) || context, // Update the options with a function/inverse (the inner templates of a section). opts = { fn: can.proxy(options.fn, context), inverse: can.proxy(options.inverse, context) }, lastArg = args[args.length - 1]; // Add the hash to `options` if one exists if (lastArg && lastArg[HASH]) { opts.hash = args.pop()[HASH]; } args.push(opts); // Call the helper. return helper.fn.apply(context, args) || ''; } // if a compute, get the value if (can.isFunction(name) && name.isComputed) { name = name(); } // An array of arguments to check for truthyness when evaluating sections. var validArgs = args.length ? args : [name], // Whether the arguments meet the condition of the section. valid = true, result = [], i, helper, argIsObserve, arg; // Validate the arguments based on the section mode. if (mode) { for (i = 0; i < validArgs.length; i++) { arg = validArgs[i]; argIsObserve = typeof arg !== 'undefined' && isObserve(arg); // Array-like objects are falsey if their length = 0. if (isArrayLike(arg)) { // Use .attr to trigger binding on empty lists returned from function if (mode == '#') { valid = valid && !! (argIsObserve ? arg.attr('length') : arg.length); } else if (mode == '^') { valid = valid && !(argIsObserve ? arg.attr('length') : arg.length); } } // Otherwise just check if it is truthy or not. else { valid = mode == '#' ? valid && !! arg : mode == '^' ? valid && !arg : valid; } } } // Otherwise interpolate like normal. if (valid) { switch (mode) { // Truthy section. case '#': // Iterate over arrays if (isArrayLike(name)) { var isObserveList = isObserve(name); // Add the reference to the list in the contexts. for (i = 0; i < name.length; i++) { result.push(options.fn.call(name[i] || {}, context) || ''); // Ensure that live update works on observable lists isObserveList && name.attr('' + i); } return result.join(''); } // Normal case. else { return options.fn.call(name || {}, context) || ''; } break; // Falsey section. case '^': return options.inverse.call(name || {}, context) || ''; break; default: // Add + '' to convert things like numbers to strings. // This can cause issues if you are trying to // eval on the length but this is the more // common case. return '' + (name !== undefined ? name : ''); break; } } return ''; }; Mustache.get = function (ref, contexts, isHelper, isArgument) { var options = contexts.options || {}; contexts = contexts.context || contexts; // Split the reference (like `a.b.c`) into an array of key names. var names = ref.split('.'), namesLength = names.length, // Assume the local object is the last context in the stack. obj = contexts[contexts.length - 1], // Assume the parent context is the second to last context in the stack. context = contexts[contexts.length - 2], lastValue, value, name, i, j, // if we walk up and don't find a property, we default // to listening on an undefined property of the first // context that is an observe defaultObserve, defaultObserveName; // Handle `this` references for list iteration: {{.}} or {{this}} if (/^\.|this$/.test(ref)) { // If context isn't an object, then it was a value passed by a helper so use it as an override. if (!/^object|undefined$/.test(typeof context)) { return context || ''; } // Otherwise just return the closest object. else { while (value = contexts.pop()) { if (typeof value !== 'undefined') { return value; } } return ''; } } // Handle object resolution (like `a.b.c`). else if (!isHelper) { // Reverse iterate through the contexts (last in, first out). for (i = contexts.length - 1; i >= 0; i--) { // Check the context for the reference value = contexts[i]; // Make sure the context isn't a failed object before diving into it. if (value !== undefined) { for (j = 0; j < namesLength; j++) { // Keep running up the tree while there are matches. if (typeof value[names[j]] != 'undefined') { lastValue = value; value = value[name = names[j]]; } // If it's undefined, still match if the parent is an Observe. else if (isObserve(value)) { defaultObserve = value; defaultObserveName = names[j]; lastValue = value = undefined; break; } else { lastValue = value = undefined; break; } } } // Found a matched reference. if (value !== undefined) { if (can.isFunction(lastValue[name]) && isArgument) { // Don't execute functions if they are parameters for a helper and are not a can.compute // Need to bind it to the original context so that that information doesn't get lost by the helper return function () { return lastValue[name].apply(lastValue, arguments); }; } else if (can.isFunction(lastValue[name])) { // Support functions stored in objects. return lastValue[name](); } // Invoke the length to ensure that Observe.List events fire. else if (isObserve(value) && isArrayLike(value) && value.attr('length')) { return value; } // Add support for observes else if (isObserve(lastValue)) { return lastValue.compute(name); } else { return value; } } } } if (defaultObserve && // if there's not a helper by this name and no attribute with this name !(Mustache.getHelper(ref) && can.inArray(defaultObserveName, can.Observe.keys(defaultObserve)) === -1)) { return defaultObserve.compute(defaultObserveName); } // Support helper-like functions as anonymous helpers if (obj !== undefined && can.isFunction(obj[ref])) { return obj[ref]; } // Support helpers without arguments, but only if there wasn't a matching data reference. else if (value = Mustache.getHelper(ref, options)) { return ref; } return ''; }; // ## Helpers // Helpers are functions that can be called from within a template. // These helpers differ from the scanner helpers in that they execute // at runtime instead of during compilation. // Custom helpers can be added via `can.Mustache.registerHelper`, // but there are also some built-in helpers included by default. // Most of the built-in helpers are little more than aliases to actions // that the base version of Mustache simply implies based on the // passed in object. // Built-in helpers: // * `data` - `data` is a special helper that is implemented via scanning helpers. // It hooks up the active element to the active data object: `<div {{data "key"}} />` // * `if` - Renders a truthy section: `{{#if var}} render {{/if}}` // * `unless` - Renders a falsey section: `{{#unless var}} render {{/unless}}` // * `each` - Renders an array: `{{#each array}} render {{this}} {{/each}}` // * `with` - Opens a context section: `{{#with var}} render {{/with}}` Mustache._helpers = {}; Mustache.registerHelper = function (name, fn) { this._helpers[name] = { name: name, fn: fn }; }; Mustache.getHelper = function (name, options) { return options && options.helpers && options.helpers[name] && { fn: options.helpers[name] } || this._helpers[name] for (var i = 0, helper; helper = [i]; i++) { // Find the correct helper if (helper.name == name) { return helper; } } return null; }; Mustache.render = function (partial, context) { // Make sure the partial being passed in // isn't a variable like { partial: "foo.mustache" } if (!can.view.cached[partial] && context[partial]) { partial = context[partial]; } // Call into `can.view.render` passing the // partial and context. return can.view.render(partial, context); }; Mustache.renderPartial = function (partial, context, options) { return partial.render ? partial.render(context, options) : partial(context, options); }; // The built-in Mustache helpers. can.each({ // Implements the `if` built-in helper. 'if': function (expr, options) { if ( !! expr) { return options.fn(this); } else { return options.inverse(this); } }, // Implements the `unless` built-in helper. 'unless': function (expr, options) { if (!expr) { return options.fn(this); } }, // Implements the `each` built-in helper. 'each': function (expr, options) { if ( !! expr && expr.length) { var result = []; for (var i = 0; i < expr.length; i++) { result.push(options.fn(expr[i])); } return result.join(''); } }, // Implements the `with` built-in helper. 'with': function (expr, options) { if ( !! expr) { return options.fn(expr); } } }, function (fn, name) { Mustache.registerHelper(name, fn); }); // ## Registration // Registers Mustache with can.view. can.view.register({ suffix: "mustache", contentType: "x-mustache-template", // Returns a `function` that renders the view. script: function (id, src) { return "can.Mustache(function(_CONTEXT,_VIEW) { " + new Mustache({ text: src, name: id }).template.out + " })"; }, renderer: function (id, text) { return Mustache({ text: text, name: id }); } }); return can; })(module["can/util/jquery/jquery.js"], module["can/view/view.js"], module["can/view/scanner.js"], module["can/observe/compute/compute.js"], module["can/view/render.js"]); // ## can/view/modifiers/modifiers.js module['can/view/modifiers/modifiers.js'] = (function ($, can) { //---- ADD jQUERY HELPERS ----- //converts jquery functions to use views var convert, modify, isTemplate, isHTML, isDOM, getCallback, // text and val cannot produce an element, so don't run hookups on them noHookup = { 'val': true, 'text': true }; convert = function (func_name) { // save the old jQuery helper var old = $.fn[func_name]; // replace it with our new helper $.fn[func_name] = function () { var args = can.makeArray(arguments), callbackNum, callback, self = this, result; // if the first arg is a deferred // wait until it finishes, and call // modify with the result if (can.isDeferred(args[0])) { args[0].done(function (res) { modify.call(self, [res], old); }) return this; } //check if a template else if (isTemplate(args)) { // if we should operate async if ((callbackNum = getCallback(args))) { callback = args[callbackNum]; args[callbackNum] = function (result) { modify.call(self, [result], old); callback.call(self, result); }; can.view.apply(can.view, args); return this; } // call view with args (there might be deferreds) result = can.view.apply(can.view, args); // if we got a string back if (!can.isDeferred(result)) { // we are going to call the old method with that string args = [result]; } else { // if there is a deferred, wait until it is done before calling modify result.done(function (res) { modify.call(self, [res], old); }) return this; } } return noHookup[func_name] ? old.apply(this, args) : modify.call(this, args, old); }; }; // modifies the content of the element // but also will run any hookup modify = function (args, old) { var res, stub, hooks; //check if there are new hookups for (var hasHookups in can.view.hookups) { break; } //if there are hookups, turn into a frag // and insert that // by using a frag, the element can be recursively hooked up // before insterion if (hasHookups && args[0] && isHTML(args[0])) { args[0] = can.view.frag(args[0]).childNodes; } //then insert into DOM res = old.apply(this, args); return res; }; // returns true or false if the args indicate a template is being used // $('#foo').html('/path/to/template.ejs',{data}) // in general, we want to make sure the first arg is a string // and the second arg is data isTemplate = function (args) { // save the second arg type var secArgType = typeof args[1]; // the first arg is a string return typeof args[0] == "string" && // the second arg is an object or function (secArgType == 'object' || secArgType == 'function') && // but it is not a dom element !isDOM(args[1]); }; // returns true if the arg is a jQuery object or HTMLElement isDOM = function (arg) { return arg.nodeType || (arg[0] && arg[0].nodeType) }; // returns whether the argument is some sort of HTML data isHTML = function (arg) { if (isDOM(arg)) { // if jQuery object or DOM node we're good return true; } else if (typeof arg === "string") { // if string, do a quick sanity check that we're HTML arg = can.trim(arg); return arg.substr(0, 1) === "<" && arg.substr(arg.length - 1, 1) === ">" && arg.length >= 3; } else { // don't know what you are return false; } }; //returns the callback arg number if there is one (for async view use) getCallback = function (args) { return typeof args[3] === 'function' ? 3 : typeof args[2] === 'function' && 2; }; $.fn.hookup = function () { can.view.frag(this); return this; }; can.each([ "prepend", "append", "after", "before", "text", "html", "replaceWith", "val"], function (func) { convert(func); }); return can; })(module["jquery/jquery.js"], module["can/view/view.js"]); // ## can/observe/observe.js module['can/observe/observe.js'] = (function (can) { // ## observe.js // `can.Observe` // _Provides the observable pattern for JavaScript Objects._ // Returns `true` if something is an object with properties of its own. var canMakeObserve = function (obj) { return obj && (can.isArray(obj) || can.isPlainObject(obj) || (obj instanceof can.Observe)); }, // Removes all listeners. unhookup = function (items, namespace) { return can.each(items, function (item) { if (item && item.unbind) { item.unbind("change" + namespace); } }); }, // Listens to changes on `val` and "bubbles" the event up. // `val` - The object to listen for changes on. // `prop` - The property name is at on. // `parent` - The parent object of prop. // `ob` - (optional) The Observe object constructor // `list` - (optional) The observable list constructor hookupBubble = function (val, prop, parent, Ob, List) { Ob = Ob || Observe; List = List || Observe.List; // If it's an `array` make a list, otherwise a val. if (val instanceof Observe) { // We have an `observe` already... // Make sure it is not listening to this already unhookup([val], parent._cid); } else if (can.isArray(val)) { val = new List(val); } else { val = new Ob(val); } // Listen to all changes and `batchTrigger` upwards. val.bind("change" + parent._cid, function () { // `batchTrigger` the type on this... var args = can.makeArray(arguments), ev = args.shift(); args[0] = (prop === "*" ? [parent.indexOf(val), args[0]] : [prop, args[0]]).join("."); // track objects dispatched on this observe ev.triggeredNS = ev.triggeredNS || {}; // if it has already been dispatched exit if (ev.triggeredNS[parent._cid]) { return; } ev.triggeredNS[parent._cid] = true; // send change event with modified attr to parent can.trigger(parent, ev, args); // send modified attr event to parent //can.trigger(parent, args[0], args); }); return val; }, // An `id` to track events for a given observe. observeId = 0, // A helper used to serialize an `Observe` or `Observe.List`. // `observe` - The observable. // `how` - To serialize with `attr` or `serialize`. // `where` - To put properties, in an `{}` or `[]`. serialize = function (observe, how, where) { // Go through each property. observe.each(function (val, name) { // If the value is an `object`, and has an `attrs` or `serialize` function. where[name] = canMakeObserve(val) && can.isFunction(val[how]) ? // Call `attrs` or `serialize` to get the original data back. val[how]() : // Otherwise return the value. val; }); return where; }, $method = function (name) { return function () { return can[name].apply(this, arguments); }; }, bind = $method('addEvent'), unbind = $method('removeEvent'), attrParts = function (attr) { return can.isArray(attr) ? attr : ("" + attr).split("."); }, // Which batch of events this is for -- might not want to send multiple // messages on the same batch. This is mostly for event delegation. batchNum = 1, // how many times has start been called without a stop transactions = 0, // an array of events within a transaction batchEvents = [], stopCallbacks = []; var Observe = can.Observe = can.Construct({ // keep so it can be overwritten bind: bind, unbind: unbind, id: "id", canMakeObserve: canMakeObserve, // starts collecting events // takes a callback for after they are updated // how could you hook into after ejs startBatch: function (batchStopHandler) { transactions++; batchStopHandler && stopCallbacks.push(batchStopHandler); }, stopBatch: function (force, callStart) { if (force) { transactions = 0; } else { transactions--; } if (transactions == 0) { var items = batchEvents.slice(0), callbacks = stopCallbacks.slice(0); batchEvents = []; stopCallbacks = []; batchNum++; callStart && this.startBatch(); can.each(items, function (args) { can.trigger.apply(can, args); }); can.each(callbacks, function (cb) { cb; }); } }, triggerBatch: function (item, event, args) { // Don't send events if initalizing. if (!item._init) { if (transactions == 0) { return can.trigger(item, event, args); } else { batchEvents.push([ item, { type: event, batchNum: batchNum }, args]); } } }, keys: function (observe) { var keys = []; Observe.__reading && Observe.__reading(observe, '__keys'); for (var keyName in observe._data) { keys.push(keyName); } return keys; } }, { setup: function (obj) { // `_data` is where we keep the properties. this._data = {}; // The namespace this `object` uses to listen to events. can.cid(this, ".observe"); // Sets all `attrs`. this._init = 1; this.attr(obj); this.bind('change' + this._cid, can.proxy(this._changes, this)); delete this._init; }, _changes: function (ev, attr, how, newVal, oldVal) { Observe.triggerBatch(this, { type: attr, batchNum: ev.batchNum }, [newVal, oldVal]); }, _triggerChange: function (attr, how, newVal, oldVal) { Observe.triggerBatch(this, "change", can.makeArray(arguments)) }, attr: function (attr, val) { // This is super obfuscated for space -- basically, we're checking // if the type of the attribute is not a `number` or a `string`. var type = typeof attr; if (type !== "string" && type !== "number") { return this._attrs(attr, val) } else if (val === undefined) { // If we are getting a value. // Let people know we are reading. Observe.__reading && Observe.__reading(this, attr) return this._get(attr) } else { // Otherwise we are setting. this._set(attr, val); return this; } }, each: function () { Observe.__reading && Observe.__reading(this, '__keys'); return can.each.apply(undefined, [this.__get()].concat(can.makeArray(arguments))) }, removeAttr: function (attr) { // Convert the `attr` into parts (if nested). var parts = attrParts(attr), // The actual property to remove. prop = parts.shift(), // The current value. current = this._data[prop]; // If we have more parts, call `removeAttr` on that part. if (parts.length) { return current.removeAttr(parts) } else { if (prop in this._data) { // Otherwise, `delete`. delete this._data[prop]; // Create the event. if (!(prop in this.constructor.prototype)) { delete this[prop] } // Let others know the number of keys have changed Observe.triggerBatch(this, "__keys"); this._triggerChange(prop, "remove", undefined, current); } return current; } }, // Reads a property from the `object`. _get: function (attr) { // break up the attr (`"foo.bar"`) into `["foo","bar"]` var parts = attrParts(attr), // get the value of the first attr name (`"foo"`) current = this.__get(parts.shift()); // if there are other attributes to read return parts.length ? // and current has a value current ? // lookup the remaining attrs on current current._get(parts) : // or if there's no current, return undefined undefined : // if there are no more parts, return current current; }, // Reads a property directly if an `attr` is provided, otherwise // returns the "real" data object itself. __get: function (attr) { return attr ? this._data[attr] : this._data; }, // Sets `attr` prop as value on this object where. // `attr` - Is a string of properties or an array of property values. // `value` - The raw value to set. _set: function (attr, value) { // Convert `attr` to attr parts (if it isn't already). var parts = attrParts(attr), // The immediate prop we are setting. prop = parts.shift(), // The current value. current = this.__get(prop); // If we have an `object` and remaining parts. if (canMakeObserve(current) && parts.length) { // That `object` should set it (this might need to call attr). current._set(parts, value) } else if (!parts.length) { // We're in "real" set territory. if (this.__convert) { value = this.__convert(prop, value) } this.__set(prop, value, current) } else { throw "can.Observe: Object does not exist" } }, __set: function (prop, value, current) { // Otherwise, we are setting it on this `object`. // TODO: Check if value is object and transform // are we changing the value. if (value !== current) { // Check if we are adding this for the first time -- // if we are, we need to create an `add` event. var changeType = this.__get().hasOwnProperty(prop) ? "set" : "add"; // Set the value on data. this.___set(prop, // If we are getting an object. canMakeObserve(value) ? // Hook it up to send event. hookupBubble(value, prop, this) : // Value is normal. value); if (changeType == "add") { // If there is no current value, let others know that // the the number of keys have changed Observe.triggerBatch(this, "__keys", undefined); } // `batchTrigger` the change event. this._triggerChange(prop, changeType, value, current); //Observe.triggerBatch(this, prop, [value, current]); // If we can stop listening to our old value, do it. current && unhookup([current], this._cid); } }, // Directly sets a property on this `object`. ___set: function (prop, val) { this._data[prop] = val; // Add property directly for easy writing. // Check if its on the `prototype` so we don't overwrite methods like `attrs`. if (!(prop in this.constructor.prototype)) { this[prop] = val } }, bind: bind, unbind: unbind, serialize: function () { return serialize(this, 'serialize', {}); }, _attrs: function (props, remove) { if (props === undefined) { return serialize(this, 'attr', {}) } props = can.extend({}, props); var prop, self = this, newVal; Observe.startBatch(); this.each(function (curVal, prop) { newVal = props[prop]; // If we are merging... if (newVal === undefined) { remove && self.removeAttr(prop); return; } if (self.__convert) { newVal = self.__convert(prop, newVal) } // if we're dealing with models, want to call _set to let converter run if (newVal instanceof can.Observe) { self.__set(prop, newVal, curVal) // if its an object, let attr merge } else if (canMakeObserve(curVal) && canMakeObserve(newVal) && curVal.attr) { curVal.attr(newVal, remove) // otherwise just set } else if (curVal != newVal) { self.__set(prop, newVal, curVal) } delete props[prop]; }) // Add remaining props. for (var prop in props) { newVal = props[prop]; this._set(prop, newVal) } Observe.stopBatch() return this; }, compute: function (prop) { var self = this, computer = function (val) { return self.attr(prop, val); }; return can.compute ? can.compute(computer) : computer; } }); // Helpers for `observable` lists. var splice = [].splice, list = Observe( { setup: function (instances, options) { this.length = 0; can.cid(this, ".observe") this._init = 1; this.push.apply(this, can.makeArray(instances || [])); this.bind('change' + this._cid, can.proxy(this._changes, this)); can.extend(this, options); delete this._init; }, _triggerChange: function (attr, how, newVal, oldVal) { Observe.prototype._triggerChange.apply(this, arguments) // `batchTrigger` direct add and remove events... if (!~attr.indexOf('.')) { if (how === 'add') { Observe.triggerBatch(this, how, [newVal, +attr]); Observe.triggerBatch(this, 'length', [this.length]); } else if (how === 'remove') { Observe.triggerBatch(this, how, [oldVal, +attr]); Observe.triggerBatch(this, 'length', [this.length]); } else { Observe.triggerBatch(this, how, [newVal, +attr]) } } }, __get: function (attr) { return attr ? this[attr] : this; }, ___set: function (attr, val) { this[attr] = val; if (+attr >= this.length) { this.length = (+attr + 1) } }, // Returns the serialized form of this list. serialize: function () { return serialize(this, 'serialize', []); }, splice: function (index, howMany) { var args = can.makeArray(arguments), i; for (i = 2; i < args.length; i++) { var val = args[i]; if (canMakeObserve(val)) { args[i] = hookupBubble(val, "*", this) } } if (howMany === undefined) { howMany = args[1] = this.length - index; } var removed = splice.apply(this, args); can.Observe.startBatch() if (howMany > 0) { this._triggerChange("" + index, "remove", undefined, removed); unhookup(removed, this._cid); } if (args.length > 2) { this._triggerChange("" + index, "add", args.slice(2), removed); } can.Observe.stopBatch(); return removed; }, _attrs: function (items, remove) { if (items === undefined) { return serialize(this, 'attr', []); } // Create a copy. items = can.makeArray(items); Observe.startBatch(); this._updateAttrs(items, remove); Observe.stopBatch() }, _updateAttrs: function (items, remove) { var len = Math.min(items.length, this.length); for (var prop = 0; prop < len; prop++) { var curVal = this[prop], newVal = items[prop]; if (canMakeObserve(curVal) && canMakeObserve(newVal)) { curVal.attr(newVal, remove) } else if (curVal != newVal) { this._set(prop, newVal) } else { } } if (items.length > this.length) { // Add in the remaining props. this.push.apply(this, items.slice(this.length)); } else if (items.length < this.length && remove) { this.splice(items.length) } } }), // Converts to an `array` of arguments. getArgs = function (args) { return args[0] && can.isArray(args[0]) ? args[0] : can.makeArray(args); }; // Create `push`, `pop`, `shift`, and `unshift` can.each({ push: "length", unshift: 0 }, // Adds a method // `name` - The method name. // `where` - Where items in the `array` should be added. function (where, name) { var orig = [][name] list.prototype[name] = function () { // Get the items being added. var args = [], // Where we are going to add items. len = where ? this.length : 0, i = arguments.length, res, val, constructor = this.constructor; // Go through and convert anything to an `observe` that needs to be converted. while (i--) { val = arguments[i]; args[i] = canMakeObserve(val) ? hookupBubble(val, "*", this, this.constructor.Observe, this.constructor) : val; } // Call the original method. res = orig.apply(this, args); if (!this.comparator || !args.length) { this._triggerChange("" + len, "add", args, undefined); } return res; } }); can.each({ pop: "length", shift: 0 }, // Creates a `remove` type method function (where, name) { list.prototype[name] = function () { var args = getArgs(arguments), len = where && this.length ? this.length - 1 : 0; var res = [][name].apply(this, args) // Create a change where the args are // `*` - Change on potentially multiple properties. // `remove` - Items removed. // `undefined` - The new values (there are none). // `res` - The old, removed values (should these be unbound). // `len` - Where these items were removed. this._triggerChange("" + len, "remove", undefined, [res]) if (res && res.unbind) { res.unbind("change" + this._cid) } return res; } }); can.extend(list.prototype, { indexOf: function (item) { this.attr('length') return can.inArray(item, this) }, join: [].join, slice: function () { var temp = Array.prototype.slice.apply(this, arguments); return new this.constructor(temp); }, concat: function () { var args = []; can.each(can.makeArray(arguments), function (arg, i) { args[i] = arg instanceof can.Observe.List ? arg.serialize() : arg; }); return new this.constructor(Array.prototype.concat.apply(this.serialize(), args)); }, forEach: function (cb, thisarg) { can.each(this, cb, thisarg || this); }, replace: function (newList) { if (can.isDeferred(newList)) { newList.then(can.proxy(this.replace, this)); } else { this.splice.apply(this, [0, this.length].concat(can.makeArray(newList || []))); } return this; } }); Observe.List = list; Observe.setup = function () { can.Construct.setup.apply(this, arguments); // I would prefer not to do it this way. It should // be using the attributes plugin to do this type of conversion. this.List = Observe.List({ Observe: this }, {}); } return Observe; })(module["can/util/jquery/jquery.js"], module["can/construct/construct.js"]); // ## can/model/model.js module['can/model/model.js'] = (function (can) { // ## model.js // `can.Model` // _A `can.Observe` that connects to a RESTful interface._ // Generic deferred piping function var pipe = function (def, model, func) { var d = new can.Deferred(); def.then(function () { var args = can.makeArray(arguments); args[0] = model[func](args[0]); d.resolveWith(d, args); }, function () { d.rejectWith(this, arguments); }); if (typeof def.abort === 'function') { d.abort = function () { return def.abort(); } } return d; }, modelNum = 0, ignoreHookup = /change.observe\d+/, getId = function (inst) { // Instead of using attr, use __get for performance. // Need to set reading can.Observe.__reading && can.Observe.__reading(inst, inst.constructor.id) return inst.__get(inst.constructor.id); }, // Ajax `options` generator function ajax = function (ajaxOb, data, type, dataType, success, error) { var params = {}; // If we get a string, handle it. if (typeof ajaxOb == "string") { // If there's a space, it's probably the type. var parts = ajaxOb.split(/\s/); params.url = parts.pop(); if (parts.length) { params.type = parts.pop(); } } else { can.extend(params, ajaxOb); } // If we are a non-array object, copy to a new attrs. params.data = typeof data == "object" && !can.isArray(data) ? can.extend(params.data || {}, data) : data; // Get the url with any templated values filled out. params.url = can.sub(params.url, params.data, true); return can.ajax(can.extend({ type: type || "post", dataType: dataType || "json", success: success, error: error }, params)); }, makeRequest = function (self, type, success, error, method) { var deferred, args = [self.serialize()], // The model. model = self.constructor, jqXHR; // `destroy` does not need data. if (type == 'destroy') { args.shift(); } // `update` and `destroy` need the `id`. if (type !== 'create') { args.unshift(getId(self)); } jqXHR = model[type].apply(model, args); deferred = jqXHR.pipe(function (data) { self[method || type + "d"](data, jqXHR); return self; }); // Hook up `abort` if (jqXHR.abort) { deferred.abort = function () { jqXHR.abort(); }; } deferred.then(success, error); return deferred; }, // This object describes how to make an ajax request for each ajax method. // The available properties are: // `url` - The default url to use as indicated as a property on the model. // `type` - The default http request type // `data` - A method that takes the `arguments` and returns `data` used for ajax. ajaxMethods = { create: { url: "_shortName", type: "post" }, update: { data: function (id, attrs) { attrs = attrs || {}; var identity = this.id; if (attrs[identity] && attrs[identity] !== id) { attrs["new" + can.capitalize(id)] = attrs[identity]; delete attrs[identity]; } attrs[identity] = id; return attrs; }, type: "put" }, destroy: { type: "delete", data: function (id) { var args = {}; args.id = args[this.id] = id; return args; } }, findAll: { url: "_shortName" }, findOne: {} }, // Makes an ajax request `function` from a string. // `ajaxMethod` - The `ajaxMethod` object defined above. // `str` - The string the user provided. Ex: `findAll: "/recipes.json"`. ajaxMaker = function (ajaxMethod, str) { // Return a `function` that serves as the ajax method. return function (data) { // If the ajax method has it's own way of getting `data`, use that. data = ajaxMethod.data ? ajaxMethod.data.apply(this, arguments) : // Otherwise use the data passed in. data; // Return the ajax method with `data` and the `type` provided. return ajax(str || this[ajaxMethod.url || "_url"], data, ajaxMethod.type || "get") } } can.Model = can.Observe({ fullName: "can.Model", setup: function (base) { // create store here if someone wants to use model without inheriting from it this.store = {}; can.Observe.setup.apply(this, arguments); // Set default list as model list if (!can.Model) { return; } this.List = ML({ Observe: this }, {}); var self = this, clean = can.proxy(this._clean, self); // go through ajax methods and set them up can.each(ajaxMethods, function (method, name) { // if an ajax method is not a function, it's either // a string url like findAll: "/recipes" or an // ajax options object like {url: "/recipes"} if (!can.isFunction(self[name])) { // use ajaxMaker to convert that into a function // that returns a deferred with the data self[name] = ajaxMaker(method, self[name]); } // check if there's a make function like makeFindAll // these take deferred function and can do special // behavior with it (like look up data in a store) if (self["make" + can.capitalize(name)]) { // pass the deferred method to the make method to get back // the "findAll" method. var newMethod = self["make" + can.capitalize(name)](self[name]); can.Construct._overwrite(self, base, name, function () { // increment the numer of requests this._reqs++; var def = newMethod.apply(this, arguments); var then = def.then(clean, clean); then.abort = def.abort; // attach abort to our then and return it return then; }) } }); if (self.fullName == "can.Model" || !self.fullName) { self.fullName = "Model" + (++modelNum); } // Add ajax converters. this._reqs = 0; this._url = this._shortName + "/{" + this.id + "}" }, _ajax: ajaxMaker, _clean: function () { this._reqs--; if (!this._reqs) { for (var id in this.store) { if (!this.store[id]._bindings) { delete this.store[id]; } } } return arguments[0]; }, models: function (instancesRawData, oldList) { if (!instancesRawData) { return; } if (instancesRawData instanceof this.List) { return instancesRawData; } // Get the list type. var self = this, tmp = [], res = oldList instanceof can.Observe.List ? oldList : new(self.List || ML), // Did we get an `array`? arr = can.isArray(instancesRawData), // Did we get a model list? ml = (instancesRawData instanceof ML), // Get the raw `array` of objects. raw = arr ? // If an `array`, return the `array`. instancesRawData : // Otherwise if a model list. (ml ? // Get the raw objects from the list. instancesRawData.serialize() : // Get the object's data. instancesRawData.data), i = 0; if (res.length) { res.splice(0); } can.each(raw, function (rawPart) { tmp.push(self.model(rawPart)); }); // We only want one change event so push everything at once res.push.apply(res, tmp); if (!arr) { // Push other stuff onto `array`. can.each(instancesRawData, function (val, prop) { if (prop !== 'data') { res.attr(prop, val); } }) } return res; }, model: function (attributes) { if (!attributes) { return; } if (attributes instanceof this) { attributes = attributes.serialize(); } var id = attributes[this.id], model = (id || id === 0) && this.store[id] ? this.store[id].attr(attributes, this.removeAttr || false) : new this(attributes); if (this._reqs) { this.store[attributes[this.id]] = model; } return model; } }, { isNew: function () { var id = getId(this); return !(id || id === 0); // If `null` or `undefined` }, save: function (success, error) { return makeRequest(this, this.isNew() ? 'create' : 'update', success, error); }, destroy: function (success, error) { if (this.isNew()) { var self = this; return can.Deferred().done(function (data) { self.destroyed(data) }).resolve(self); } return makeRequest(this, 'destroy', success, error, 'destroyed'); }, bind: function (eventName) { if (!ignoreHookup.test(eventName)) { if (!this._bindings) { this.constructor.store[this.__get(this.constructor.id)] = this; this._bindings = 0; } this._bindings++; } return can.Observe.prototype.bind.apply(this, arguments); }, unbind: function (eventName) { if (!ignoreHookup.test(eventName)) { this._bindings--; if (!this._bindings) { delete this.constructor.store[getId(this)]; } } return can.Observe.prototype.unbind.apply(this, arguments); }, // Change `id`. ___set: function (prop, val) { can.Observe.prototype.___set.call(this, prop, val) // If we add an `id`, move it to the store. if (prop === this.constructor.id && this._bindings) { this.constructor.store[getId(this)] = this; } } }); can.each({ makeFindAll: "models", makeFindOne: "model" }, function (method, name) { can.Model[name] = function (oldFind) { return function (params, success, error) { var def = pipe(oldFind.call(this, params), this, method); def.then(success, error); // return the original promise return def; }; }; }); can.each([ "created", "updated", "destroyed"], function (funcName) { can.Model.prototype[funcName] = function (attrs) { var stub, constructor = this.constructor; // Update attributes if attributes have been passed stub = attrs && typeof attrs == 'object' && this.attr(attrs.attr ? attrs.attr() : attrs); // Call event on the instance can.trigger(this, funcName); can.trigger(this, "change", funcName) // Call event on the instance's Class can.trigger(constructor, funcName, this); }; }); // Model lists are just like `Observe.List` except that when their items are // destroyed, it automatically gets removed from the list. var ML = can.Model.List = can.Observe.List({ setup: function () { can.Observe.List.prototype.setup.apply(this, arguments); // Send destroy events. var self = this; this.bind('change', function (ev, how) { if (/\w+\.destroyed/.test(how)) { var index = self.indexOf(ev.target); if (index != -1) { self.splice(index, 1); } } }) } }) return can.Model; })(module["can/util/jquery/jquery.js"], module["can/observe/observe.js"]); // ## can/view/ejs/ejs.js module['can/view/ejs/ejs.js'] = (function (can) { // ## ejs.js // `can.EJS` // _Embedded JavaScript Templates._ // Helper methods. var extend = can.extend, EJS = function (options) { // Supports calling EJS without the constructor // This returns a function that renders the template. if (this.constructor != EJS) { var ejs = new EJS(options); return function (data, helpers) { return ejs.render(data, helpers); }; } // If we get a `function` directly, it probably is coming from // a `steal`-packaged view. if (typeof options == "function") { this.template = { fn: options }; return; } // Set options on self. extend(this, options); this.template = this.scanner.scan(this.text, this.name); }; can.EJS = EJS; EJS.prototype. render = function (object, extraHelpers) { object = object || {}; return this.template.fn.call(object, object, new EJS.Helpers(object, extraHelpers || {})); }; extend(EJS.prototype, { scanner: new can.view.Scanner({ tokens: [ ["templateLeft", "<%%"], // Template ["templateRight", "%>"], // Right Template ["returnLeft", "<%=="], // Return Unescaped ["escapeLeft", "<%="], // Return Escaped ["commentLeft", "<%#"], // Comment ["left", "<%"], // Run --- this is hack for now ["right", "%>"], // Right -> All have same FOR Mustache ... ["returnRight", "%>"] ] }) }); EJS.Helpers = function (data, extras) { this._data = data; this._extras = extras; extend(this, extras); }; EJS.Helpers.prototype = { // TODO Deprecated!! list: function (list, cb) { can.each(list, function (item, i) { cb(item, i, list) }) } }; // Options for `steal`'s build. can.view.register({ suffix: "ejs", // returns a `function` that renders the view. script: function (id, src) { return "can.EJS(function(_CONTEXT,_VIEW) { " + new EJS({ text: src, name: id }).template.out + " })"; }, renderer: function (id, text) { return EJS({ text: text, name: id }); } }); return can; })(module["can/util/jquery/jquery.js"], module["can/view/view.js"], module["can/util/string/string.js"], module["can/observe/compute/compute.js"], module["can/view/scanner.js"], module["can/view/render.js"]); // ## can/observe/attributes/attributes.js module['can/observe/attributes/attributes.js'] = (function (can, Observe) { can.each([can.Observe, can.Model], function (clss) { // in some cases model might not be defined quite yet. if (clss === undefined) { return; } can.extend(clss, { attributes: {}, convert: { "date": function (str) { var type = typeof str; if (type === "string") { return isNaN(Date.parse(str)) ? null : Date.parse(str) } else if (type === 'number') { return new Date(str) } else { return str } }, "number": function (val) { return parseFloat(val); }, "boolean": function (val) { if (val === 'false' || val === '0' || !val) { return false; } return true; }, "default": function (val, oldVal, error, type) { var construct = can.getObject(type), context = window, realType; // if type has a . we need to look it up if (type.indexOf(".") >= 0) { // get everything before the last . realType = type.substring(0, type.lastIndexOf(".")); // get the object before the last . context = can.getObject(realType); } return typeof construct == "function" ? construct.call(context, val, oldVal) : val; } }, serialize: { "default": function (val, type) { return isObject(val) && val.serialize ? val.serialize() : val; }, "date": function (val) { return val && val.getTime() } } }); // overwrite setup to do this stuff var oldSetup = clss.setup; clss.setup = function (superClass, stat, proto) { var self = this; oldSetup.call(self, superClass, stat, proto); can.each(["attributes"], function (name) { if (!self[name] || superClass[name] === self[name]) { self[name] = {}; } }); can.each(["convert", "serialize"], function (name) { if (superClass[name] != self[name]) { self[name] = can.extend({}, superClass[name], self[name]); } }); }; }); var oldSetup = can.Observe.prototype.setup; can.Observe.prototype.setup = function (obj) { var diff = {}; oldSetup.call(this, obj); can.each(this.constructor.defaults, function (value, key) { if (!this.hasOwnProperty(key)) { diff[key] = value; } }, this); this._init = 1; this.attr(diff); delete this._init; }; can.Observe.prototype.__convert = function (prop, value) { // check if there is a var Class = this.constructor, oldVal = this.attr(prop), type, converter; if (Class.attributes) { // the type of the attribute type = Class.attributes[prop]; converter = Class.convert[type] || Class.convert['default']; } return value === null || !type ? // just use the value value : // otherwise, pass to the converter converter.call(Class, value, oldVal, function () {}, type); }; can.Observe.prototype.serialize = function (attrName) { var where = {}, Class = this.constructor, attrs = {}; if (attrName != undefined) { attrs[attrName] = this[attrName]; } else { attrs = this.__get(); } can.each(attrs, function (val, name) { var type, converter; type = Class.attributes ? Class.attributes[name] : 0; converter = Class.serialize ? Class.serialize[type] : 0; // if the value is an object, and has a attrs or serialize function where[name] = val && typeof val.serialize == 'function' ? // call attrs or serialize to get the original data back val.serialize() : // otherwise if we have a converter converter ? // use the converter converter(val, type) : // or return the val val }); return attrName != undefined ? where[attrName] : where; }; return can.Observe; })(module["can/util/jquery/jquery.js"], module["can/observe/observe.js"]); // ## can/observe/delegate/delegate.js module['can/observe/delegate/delegate.js'] = (function (can) { // ** - 'this' will be the deepest item changed // * - 'this' will be any changes within *, but * will be the // this returned // tells if the parts part of a delegate matches the broken up props of the event // gives the prop to use as 'this' // - parts - the attribute name of the delegate split in parts ['foo','*'] // - props - the split props of the event that happened ['foo','bar','0'] // - returns - the attribute to delegate too ('foo.bar'), or null if not a match var matches = function (parts, props) { //check props parts are the same or var len = parts.length, i = 0, // keeps the matched props we will use matchedProps = [], prop; // if the event matches for (i; i < len; i++) { prop = props[i] // if no more props (but we should be matching them) // return null if (typeof prop !== 'string') { return null; } else // if we have a "**", match everything if (parts[i] == "**") { return props.join("."); } else // a match, but we want to delegate to "*" if (parts[i] == "*") { // only do this if there is nothing after ... matchedProps.push(prop); } else if (prop === parts[i]) { matchedProps.push(prop); } else { return null; } } return matchedProps.join("."); }, // gets a change event and tries to figure out which // delegates to call delegate = function (event, prop, how, newVal, oldVal) { // pre-split properties to save some regexp time var props = prop.split("."), delegates = (this._observe_delegates || []).slice(0), delegate, attr, matchedAttr, hasMatch, valuesEqual; event.attr = prop; event.lastAttr = props[props.length - 1]; // for each delegate for (var i = 0; delegate = delegates[i++];) { // if there is a batchNum, this means that this // event is part of a series of events caused by a single // attrs call. We don't want to issue the same event // multiple times // setting the batchNum happens later if ((event.batchNum && delegate.batchNum === event.batchNum) || delegate.undelegated) { continue; } // reset match and values tests hasMatch = undefined; valuesEqual = true; // yeah, all this under here has to be redone v // for each attr in a delegate for (var a = 0; a < delegate.attrs.length; a++) { attr = delegate.attrs[a]; // check if it is a match if (matchedAttr = matches(attr.parts, props)) { hasMatch = matchedAttr; } // if it has a value, make sure it's the right value // if it's set, we should probably check that it has a // value no matter what if (attr.value && valuesEqual) { valuesEqual = attr.value === "" + this.attr(attr.attr) } else if (valuesEqual && delegate.attrs.length > 1) { // if there are multiple attributes, each has to at // least have some value valuesEqual = this.attr(attr.attr) !== undefined } } // if there is a match and valuesEqual ... call back if (hasMatch && valuesEqual) { // how to get to the changed property from the delegate var from = prop.replace(hasMatch + ".", ""); // if this event is part of a batch, set it on the delegate // to only send one event if (event.batchNum) { delegate.batchNum = event.batchNum } // if we listen to change, fire those with the same attrs // TODO: the attrs should probably be using from if (delegate.event === 'change') { arguments[1] = from; event.curAttr = hasMatch; delegate.callback.apply(this.attr(hasMatch), can.makeArray(arguments)); } else if (delegate.event === how) { // if it's a match, callback with the location of the match delegate.callback.apply(this.attr(hasMatch), [event, newVal, oldVal, from]); } else if (delegate.event === 'set' && how == 'add') { // if we are listening to set, we should also listen to add delegate.callback.apply(this.attr(hasMatch), [event, newVal, oldVal, from]); } } } }; can.extend(can.Observe.prototype, { delegate: function (selector, event, handler) { selector = can.trim(selector); var delegates = this._observe_delegates || (this._observe_delegates = []), attrs = [], selectorRegex = /([^\s=,]+)(?:=("[^",]*"|'[^',]*'|[^\s"',]*))?(,?)\s*/g, matches; // parse each property in the selector while (matches = selectorRegex.exec(selector)) { // we need to do a little doctoring to make up for the quotes. if (matches[2] && $.inArray(matches[2].substr(0, 1), ['"', "'"]) >= 0) { matches[2] = matches[2].substr(1, -1); } attrs.push({ // the attribute name attr: matches[1], // the attribute name, pre-split for speed parts: matches[1].split('.'), // the value associated with this property (if there was one given) value: matches[2], // whether this selector combines with the one after it with AND or OR or: matches[3] === ',' }); } // delegates has pre-processed info about the event delegates.push({ // the attrs name for unbinding selector: selector, // an object of attribute names and values {type: 'recipe',id: undefined} // undefined means a value was not defined attrs: attrs, callback: handler, event: event }); if (delegates.length === 1) { this.bind("change", delegate) } return this; }, undelegate: function (selector, event, handler) { selector = can.trim(selector); var i = 0, delegates = this._observe_delegates || [], delegateOb; if (selector) { while (i < delegates.length) { delegateOb = delegates[i]; if (delegateOb.callback === handler || (!handler && delegateOb.selector === selector)) { delegateOb.undelegated = true; delegates.splice(i, 1) } else { i++; } } } else { // remove all delegates delegates = []; } if (!delegates.length) { //can.removeData(this, "_observe_delegates"); this.unbind("change", delegate) } return this; } }); // add helpers for testing .. can.Observe.prototype.delegate.matches = matches; return can.Observe; })(module["can/util/jquery/jquery.js"], module["can/observe/observe.js"]); // ## can/observe/setter/setter.js module['can/observe/setter/setter.js'] = (function (can) { can.classize = function (s, join) { // this can be moved out .. // used for getter setter var parts = s.split(can.undHash), i = 0; for (; i < parts.length; i++) { parts[i] = can.capitalize(parts[i]); } return parts.join(join || ''); } var classize = can.classize, proto = can.Observe.prototype, old = proto.__set; proto.__set = function (prop, value, current, success, error) { // check if there's a setter var cap = classize(prop), setName = "set" + cap, errorCallback = function (errors) { var stub = error && error.call(self, errors); // if 'setter' is on the page it will trigger // the error itself and we dont want to trigger // the event twice. :) if (stub !== false) { can.trigger(self, "error", [prop, errors], true); } return false; }, self = this; // if we have a setter if (this[setName] && // call the setter, if returned value is undefined, // this means the setter is async so we // do not call update property and return right away (value = this[setName](value, function (value) { old.call(self, prop, value, current, success, errorCallback) }, errorCallback)) === undefined) { return; } old.call(self, prop, value, current, success, errorCallback); return this; }; return can.Observe; })(module["can/util/jquery/jquery.js"], module["can/observe/attributes/attributes.js"]); // ## can/observe/validations/validations.js module['can/observe/validations/validations.js'] = (function (can) { //validations object is by property. You can have validations that //span properties, but this way we know which ones to run. // proc should return true if there's an error or the error message var validate = function (attrNames, options, proc) { // normalize argumetns if (!proc) { proc = options; options = {}; } options = options || {}; attrNames = can.makeArray(attrNames) // run testIf if it exists if (options.testIf && !options.testIf.call(this)) { return; } var self = this; can.each(attrNames, function (attrName) { // Add a test function for each attribute if (!self.validations[attrName]) { self.validations[attrName] = []; } self.validations[attrName].push(function (newVal) { // if options has a message return that, otherwise, return the error var res = proc.call(this, newVal, attrName); return res === undefined ? undefined : (options.message || res); }) }); }; var old = can.Observe.prototype.__set; can.Observe.prototype.__set = function (prop, value, current, success, error) { var self = this, validations = self.constructor.validations, errorCallback = function (errors) { var stub = error && error.call(self, errors); // if 'setter' is on the page it will trigger // the error itself and we dont want to trigger // the event twice. :) if (stub !== false) { can.trigger(self, "error", [prop, errors], true); } return false; }; old.call(self, prop, value, current, success, errorCallback); if (validations && validations[prop]) { var errors = self.errors(prop); errors && errorCallback(errors) } return this; } can.each([can.Observe, can.Model], function (clss) { // in some cases model might not be defined quite yet. if (clss === undefined) { return; } var oldSetup = clss.setup; can.extend(clss, { setup: function (superClass) { oldSetup.apply(this, arguments); if (!this.validations || superClass.validations === this.validations) { this.validations = {}; } }, validate: validate, validationMessages: { format: "is invalid", inclusion: "is not a valid option (perhaps out of range)", lengthShort: "is too short", lengthLong: "is too long", presence: "can't be empty", range: "is out of range" }, validateFormatOf: function (attrNames, regexp, options) { validate.call(this, attrNames, options, function (value) { if ((typeof value !== 'undefined' && value !== null && value !== '') && String(value).match(regexp) == null) { return this.constructor.validationMessages.format; } }); }, validateInclusionOf: function (attrNames, inArray, options) { validate.call(this, attrNames, options, function (value) { if (typeof value == 'undefined') { return; } if (can.grep(inArray, function (elm) { return (elm == value); }).length == 0) { return this.constructor.validationMessages.inclusion; } }); }, validateLengthOf: function (attrNames, min, max, options) { validate.call(this, attrNames, options, function (value) { if (((typeof value === 'undefined' || value === null) && min > 0) || (typeof value !== 'undefined' && value !== null && value.length < min)) { return this.constructor.validationMessages.lengthShort + " (min=" + min + ")"; } else if (typeof value != 'undefined' && value !== null && value.length > max) { return this.constructor.validationMessages.lengthLong + " (max=" + max + ")"; } }); }, validatePresenceOf: function (attrNames, options) { validate.call(this, attrNames, options, function (value) { if (typeof value == 'undefined' || value === "" || value === null) { return this.constructor.validationMessages.presence; } }); }, validateRangeOf: function (attrNames, low, hi, options) { validate.call(this, attrNames, options, function (value) { if (((typeof value == 'undefined' || value === null) && low > 0) || (typeof value !== 'undefined' && value !== null && (value < low || value > hi))) { return this.constructor.validationMessages.range + " [" + low + "," + hi + "]"; } }); } }); }); can.extend(can.Observe.prototype, { errors: function (attrs, newVal) { // convert attrs to an array if (attrs) { attrs = can.isArray(attrs) ? attrs : [attrs]; } var errors = {}, self = this, attr, // helper function that adds error messages to errors object // attr - the name of the attribute // funcs - the validation functions addErrors = function (attr, funcs) { can.each(funcs, function (func) { var res = func.call(self, isTest ? (self.__convert ? self.__convert(attr, newVal) : newVal) : self[attr]); if (res) { if (!errors[attr]) { errors[attr] = []; } errors[attr].push(res); } }); }, validations = this.constructor.validations, isTest = attrs && attrs.length === 1 && arguments.length === 2; // go through each attribute or validation and // add any errors can.each(attrs || validations || {}, function (funcs, attr) { // if we are iterating through an array, use funcs // as the attr name if (typeof attr == 'number') { attr = funcs; funcs = validations[attr]; } // add errors to the addErrors(attr, funcs || []); }); // return errors as long as we have one return can.isEmptyObject(errors) ? null : isTest ? errors[attrs[0]] : errors; } }); return can.Observe; })(module["can/util/jquery/jquery.js"], module["can/observe/attributes/attributes.js"]); // ## can/util/string/deparam/deparam.js module['can/util/string/deparam/deparam.js'] = (function (can) { // ## deparam.js // `can.deparam` // _Takes a string of name value pairs and returns a Object literal that represents those params._ var digitTest = /^\d+$/, keyBreaker = /([^\[\]]+)|(\[\])/g, paramTest = /([^?#]*)(#.*)?$/, prep = function (str) { return decodeURIComponent(str.replace(/\+/g, " ")); }; can.extend(can, { deparam: function (params) { var data = {}, pairs, lastPart; if (params && paramTest.test(params)) { pairs = params.split('&'), can.each(pairs, function (pair) { var parts = pair.split('='), key = prep(parts.shift()), value = prep(parts.join("=")), current = data; parts = key.match(keyBreaker); for (var j = 0, l = parts.length - 1; j < l; j++) { if (!current[parts[j]]) { // If what we are pointing to looks like an `array` current[parts[j]] = digitTest.test(parts[j + 1]) || parts[j + 1] == "[]" ? [] : {}; } current = current[parts[j]]; } lastPart = parts.pop(); if (lastPart == "[]") { current.push(value); } else { current[lastPart] = value; } }); } return data; } }); return can; })(module["can/util/jquery/jquery.js"], module["can/util/string/string.js"]); // ## can/route/route.js module['can/route/route.js'] = (function (can) { // ## route.js // `can.route` // _Helps manage browser history (and client state) by synchronizing the // `window.location.hash` with a `can.Observe`._ // Helper methods used for matching routes. var // `RegExp` used to match route variables of the type ':name'. // Any word character or a period is matched. matcher = /\:([\w\.]+)/g, // Regular expression for identifying &amp;key=value lists. paramsMatcher = /^(?:&[^=]+=[^&]*)+/, // Converts a JS Object into a list of parameters that can be // inserted into an html element tag. makeProps = function (props) { var tags = []; can.each(props, function (val, name) { tags.push((name === 'className' ? 'class' : name) + '="' + (name === "href" ? val : can.esc(val)) + '"'); }); return tags.join(" "); }, // Checks if a route matches the data provided. If any route variable // is not present in the data, the route does not match. If all route // variables are present in the data, the number of matches is returned // to allow discerning between general and more specific routes. matchesData = function (route, data) { var count = 0, i = 0, defaults = {}; // look at default values, if they match ... for (var name in route.defaults) { if (route.defaults[name] === data[name]) { // mark as matched defaults[name] = 1; count++; } } for (; i < route.names.length; i++) { if (!data.hasOwnProperty(route.names[i])) { return -1; } if (!defaults[route.names[i]]) { count++; } } return count; }, onready = !0, location = window.location, wrapQuote = function (str) { return (str + '').replace(/([.?*+\^$\[\]\\(){}|\-])/g, "\\$1"); }, each = can.each, extend = can.extend; can.route = function (url, defaults) { defaults = defaults || {}; // Extract the variable names and replace with `RegExp` that will match // an atual URL with values. var names = [], test = url.replace(matcher, function (whole, name, i) { names.push(name); var next = "\\" + (url.substr(i + whole.length, 1) || can.route._querySeparator); // a name without a default value HAS to have a value // a name that has a default value can be empty // The `\\` is for string-escaping giving single `\` for `RegExp` escaping. return "([^" + next + "]" + (defaults[name] ? "*" : "+") + ")"; }); // Add route in a form that can be easily figured out. can.route.routes[url] = { // A regular expression that will match the route when variable values // are present; i.e. for `:page/:type` the `RegExp` is `/([\w\.]*)/([\w\.]*)/` which // will match for any value of `:page` and `:type` (word chars or period). test: new RegExp("^" + test + "($|" + wrapQuote(can.route._querySeparator) + ")"), // The original URL, same as the index for this entry in routes. route: url, // An `array` of all the variable names in this route. names: names, // Default values provided for the variables. defaults: defaults, // The number of parts in the URL separated by `/`. length: url.split('/').length }; return can.route; }; extend(can.route, { _querySeparator: '&', _paramsMatcher: paramsMatcher, param: function (data, _setRoute) { // Check if the provided data keys match the names in any routes; // Get the one with the most matches. var route, // Need to have at least 1 match. matches = 0, matchCount, routeName = data.route, propCount = 0; delete data.route; each(data, function () { propCount++; }); // Otherwise find route. each(can.route.routes, function (temp, name) { // best route is the first with all defaults matching matchCount = matchesData(temp, data); if (matchCount > matches) { route = temp; matches = matchCount; } if (matchCount >= propCount) { return false; } }); // If we have a route name in our `can.route` data, and it's // just as good as what currently matches, use that if (can.route.routes[routeName] && matchesData(can.route.routes[routeName], data) === matches) { route = can.route.routes[routeName]; } // If this is match... if (route) { var cpy = extend({}, data), // Create the url by replacing the var names with the provided data. // If the default value is found an empty string is inserted. res = route.route.replace(matcher, function (whole, name) { delete cpy[name]; return data[name] === route.defaults[name] ? "" : encodeURIComponent(data[name]); }), after; // Remove matching default values each(route.defaults, function (val, name) { if (cpy[name] === val) { delete cpy[name]; } }); // The remaining elements of data are added as // `&amp;` separated parameters to the url. after = can.param(cpy); // if we are paraming for setting the hash // we also want to make sure the route value is updated if (_setRoute) { can.route.attr('route', route.route); } return res + (after ? can.route._querySeparator + after : ""); } // If no route was found, there is no hash URL, only paramters. return can.isEmptyObject(data) ? "" : can.route._querySeparator + can.param(data); }, deparam: function (url) { // See if the url matches any routes by testing it against the `route.test` `RegExp`. // By comparing the URL length the most specialized route that matches is used. var route = { length: -1 }; each(can.route.routes, function (temp, name) { if (temp.test.test(url) && temp.length > route.length) { route = temp; } }); // If a route was matched. if (route.length > -1) { var // Since `RegExp` backreferences are used in `route.test` (parens) // the parts will contain the full matched string and each variable (back-referenced) value. parts = url.match(route.test), // Start will contain the full matched string; parts contain the variable values. start = parts.shift(), // The remainder will be the `&amp;key=value` list at the end of the URL. remainder = url.substr(start.length - (parts[parts.length - 1] === can.route._querySeparator ? 1 : 0)), // If there is a remainder and it contains a `&amp;key=value` list deparam it. obj = (remainder && can.route._paramsMatcher.test(remainder)) ? can.deparam(remainder.slice(1)) : {}; // Add the default values for this route. obj = extend(true, {}, route.defaults, obj); // Overwrite each of the default values in `obj` with those in // parts if that part is not empty. each(parts, function (part, i) { if (part && part !== can.route._querySeparator) { obj[route.names[i]] = decodeURIComponent(part); } }); obj.route = route.route; return obj; } // If no route was matched, it is parsed as a `&amp;key=value` list. if (url.charAt(0) !== can.route._querySeparator) { url = can.route._querySeparator + url; } return can.route._paramsMatcher.test(url) ? can.deparam(url.slice(1)) : {}; }, data: new can.Observe({}), routes: {}, ready: function (val) { if (val === false) { onready = val; } if (val === true || onready === true) { can.route._setup(); setState(); } return can.route; }, url: function (options, merge) { if (merge) { options = extend({}, curParams, options) } return "#!" + can.route.param(options); }, link: function (name, options, props, merge) { return "<a " + makeProps( extend({ href: can.route.url(options, merge) }, props)) + ">" + name + "</a>"; }, current: function (options) { return location.hash == "#!" + can.route.param(options) }, _setup: function () { // If the hash changes, update the `can.route.data`. can.bind.call(window, 'hashchange', setState); }, _getHash: function () { return location.href.split(/#!?/)[1] || ""; }, _setHash: function (serialized) { var path = (can.route.param(serialized, true)); location.hash = "#!" + path; return path; } }); // The functions in the following list applied to `can.route` (e.g. `can.route.attr('...')`) will // instead act on the `can.route.data` observe. each(['bind', 'unbind', 'delegate', 'undelegate', 'attr', 'removeAttr'], function (name) { can.route[name] = function () { return can.route.data[name].apply(can.route.data, arguments) } }) var // A ~~throttled~~ debounced function called multiple times will only fire once the // timer runs down. Each call resets the timer. timer, // Intermediate storage for `can.route.data`. curParams, // Deparameterizes the portion of the hash of interest and assign the // values to the `can.route.data` removing existing values no longer in the hash. // setState is called typically by hashchange which fires asynchronously // So it's possible that someone started changing the data before the // hashchange event fired. For this reason, it will not set the route data // if the data is changing or the hash already matches the hash that was set. setState = can.route.setState = function () { var hash = can.route._getHash(); curParams = can.route.deparam(hash); // if the hash data is currently changing, or // the hash is what we set it to anyway, do NOT change the hash if (!changingData || hash !== lastHash) { can.route.attr(curParams, true); } }, // The last hash caused by a data change lastHash, // Are data changes pending that haven't yet updated the hash changingData; // If the `can.route.data` changes, update the hash. // Using `.serialize()` retrieves the raw data contained in the `observable`. // This function is ~~throttled~~ debounced so it only updates once even if multiple values changed. // This might be able to use batchNum and avoid this. can.route.bind("change", function (ev, attr) { // indicate that data is changing changingData = 1; clearTimeout(timer); timer = setTimeout(function () { // indicate that the hash is set to look like the data changingData = 0; var serialized = can.route.data.serialize(); lastHash = can.route._setHash(serialized); }, 1); }); // `onready` event... can.bind.call(document, "ready", can.route.ready); // Libraries other than jQuery don't execute the document `ready` listener // if we are already DOM ready if ((document.readyState === 'complete' || document.readyState === "interactive") && onready) { can.route.ready(); } // extend route to have a similar property // that is often checked in mustache to determine // an object's observability can.route.constructor.canMakeObserve = can.Observe.canMakeObserve; return can.route; })(module["can/util/jquery/jquery.js"], module["can/observe/observe.js"], module["can/util/string/deparam/deparam.js"]); // ## can/util/object/object.js module['can/util/object/object.js'] = (function (can) { var isArray = can.isArray, // essentially returns an object that has all the must have comparisons ... // must haves, do not return true when provided undefined cleanSet = function (obj, compares) { var copy = can.extend({}, obj); for (var prop in copy) { var compare = compares[prop] === undefined ? compares["*"] : compares[prop]; if (same(copy[prop], undefined, compare)) { delete copy[prop] } } return copy; }, propCount = function (obj) { var count = 0; for (var prop in obj) count++; return count; }; can.Object = {}; var same = can.Object.same = function (a, b, compares, aParent, bParent, deep) { var aType = typeof a, aArray = isArray(a), comparesType = typeof compares, compare; if (comparesType == 'string' || compares === null) { compares = compareMethods[compares]; comparesType = 'function' } if (comparesType == 'function') { return compares(a, b, aParent, bParent) } compares = compares || {}; if (a instanceof Date) { return a === b; } if (deep === -1) { return aType === 'object' || a === b; } if (aType !== typeof b || aArray !== isArray(b)) { return false; } if (a === b) { return true; } if (aArray) { if (a.length !== b.length) { return false; } for (var i = 0; i < a.length; i++) { compare = compares[i] === undefined ? compares["*"] : compares[i] if (!same(a[i], b[i], a, b, compare)) { return false; } }; return true; } else if (aType === "object" || aType === 'function') { var bCopy = can.extend({}, b); for (var prop in a) { compare = compares[prop] === undefined ? compares["*"] : compares[prop]; if (!same(a[prop], b[prop], compare, a, b, deep === false ? -1 : undefined)) { return false; } delete bCopy[prop]; } // go through bCopy props ... if there is no compare .. return false for (prop in bCopy) { if (compares[prop] === undefined || !same(undefined, b[prop], compares[prop], a, b, deep === false ? -1 : undefined)) { return false; } } return true; } return false; }; can.Object.subsets = function (checkSet, sets, compares) { var len = sets.length, subsets = [], checkPropCount = propCount(checkSet), setLength; for (var i = 0; i < len; i++) { //check this subset var set = sets[i]; if (can.Object.subset(checkSet, set, compares)) { subsets.push(set) } } return subsets; }; can.Object.subset = function (subset, set, compares) { // go through set {type: 'folder'} and make sure every property // is in subset {type: 'folder', parentId :5} // then make sure that set has fewer properties // make sure we are only checking 'important' properties // in subset (ones that have to have a value) var setPropCount = 0, compares = compares || {}; for (var prop in set) { if (!same(subset[prop], set[prop], compares[prop], subset, set)) { return false; } } return true; } var compareMethods = { "null": function () { return true; }, i: function (a, b) { return ("" + a).toLowerCase() == ("" + b).toLowerCase() } } return can; })(module["can/util/jquery/jquery.js"]); // ## can/observe/backup/backup.js module['can/observe/backup/backup.js'] = (function (can) { var flatProps = function (a) { var obj = {}; for (var prop in a) { if (typeof a[prop] !== 'object' || a[prop] === null || a[prop] instanceof Date) { obj[prop] = a[prop] } } return obj; }; can.extend(can.Observe.prototype, { backup: function () { this._backupStore = this._attrs(); return this; }, isDirty: function (checkAssociations) { return this._backupStore && !can.Object.same(this._attrs(), this._backupStore, undefined, undefined, undefined, !! checkAssociations); }, restore: function (restoreAssociations) { var props = restoreAssociations ? this._backupStore : flatProps(this._backupStore) if (this.isDirty(restoreAssociations)) { this._attrs(props); } return this; } }) return can.Observe; })(module["can/util/jquery/jquery.js"], module["can/observe/observe.js"], module["can/util/object/object.js"]); window.define = module._define; window.module = module._orig;
PypiClean
/Braindecode-0.7.tar.gz/Braindecode-0.7/braindecode/models/eegitnet.py
import torch from torch import nn from .modules import Ensure4d, Expression def _permute(x): """Permute data. Input dimensions: (batch, channels, time, 1) Output dimiensions: (batch, 1, channels, time) """ return x.permute([0, 3, 1, 2]) class _DepthwiseConv2d(torch.nn.Conv2d): def __init__( self, in_channels, depth_multiplier=2, kernel_size=3, stride=1, padding=0, dilation=1, bias=True, padding_mode="zeros", ): out_channels = in_channels * depth_multiplier super().__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=in_channels, bias=bias, padding_mode=padding_mode, ) class _InceptionBlock(nn.Module): def __init__(self, branches): super().__init__() self.branches = nn.ModuleList(branches) def forward(self, x): return torch.cat([branch(x) for branch in self.branches], 1) class _TCBlock(nn.Module): def __init__(self, in_ch, kernel_length, dialation, padding, drop_prob=0.4): super().__init__() self.pad = padding self.tc1 = nn.Sequential( _DepthwiseConv2d( in_ch, kernel_size=(1, kernel_length), depth_multiplier=1, dilation=(1, dialation), bias=False, padding="valid", ), nn.BatchNorm2d(in_ch), nn.ELU(), nn.Dropout(drop_prob), ) self.tc2 = nn.Sequential( _DepthwiseConv2d( in_ch, kernel_size=(1, kernel_length), depth_multiplier=1, dilation=(1, dialation), bias=False, padding="valid", ), nn.BatchNorm2d(in_ch), nn.ELU(), nn.Dropout(drop_prob), ) def forward(self, x): residual = x paddings = (self.pad, 0, 0, 0, 0, 0, 0, 0) x = nn.functional.pad(x, paddings) x = self.tc1(x) x = nn.functional.pad(x, paddings) x = self.tc2(x) + residual return x class EEGITNet(nn.Sequential): """EEG-ITNet: An Explainable Inception Temporal Convolutional Network for motor imagery classification from Salami et. al 2022. See [Salami2022]_ for details. Code adapted from https://github.com/abbassalami/eeg-itnet Parameters ---------- n_classes: int number of outputs of the decoding task (for example number of classes in classification) n_in_chans: int number of input EEG channels input_window_samples : int Number of time samples. drop_prob: float Dropout probability. References ---------- .. [Salami2022] A. Salami, J. Andreu-Perez and H. Gillmeister, "EEG-ITNet: An Explainable Inception Temporal Convolutional Network for motor imagery classification," in IEEE Access, doi: 10.1109/ACCESS.2022.3161489. Notes ----- This implementation is not guaranteed to be correct, has not been checked by original authors, only reimplemented from the paper based on author implementation. """ def __init__(self, n_classes, in_channels, input_window_samples, drop_prob=0.4): super().__init__() # ======== Handling EEG input ======================== self.add_module( "input_preprocess", nn.Sequential(Ensure4d(), Expression(_permute)) ) # ======== Inception branches ======================== block11 = self._get_inception_branch( in_channels=in_channels, out_channels=2, kernel_length=16 ) block12 = self._get_inception_branch( in_channels=in_channels, out_channels=4, kernel_length=32 ) block13 = self._get_inception_branch( in_channels=in_channels, out_channels=8, kernel_length=64 ) self.add_module("inception_block", _InceptionBlock((block11, block12, block13))) self.pool1 = self.add_module("pooling", nn.Sequential( nn.AvgPool2d(kernel_size=(1, 4)), nn.Dropout(drop_prob))) # =========== TC blocks ===================== self.add_module( "TC_block1", _TCBlock(in_ch=14, kernel_length=4, dialation=1, padding=3, drop_prob=drop_prob) ) # ================================ self.add_module( "TC_block2", _TCBlock(in_ch=14, kernel_length=4, dialation=2, padding=6, drop_prob=drop_prob) ) # ================================ self.add_module( "TC_block3", _TCBlock(in_ch=14, kernel_length=4, dialation=4, padding=12, drop_prob=drop_prob) ) # ================================ self.add_module( "TC_block4", _TCBlock(in_ch=14, kernel_length=4, dialation=8, padding=24, drop_prob=drop_prob) ) # ============= Dimensionality reduction =================== self.add_module("dim_reduction", nn.Sequential( nn.Conv2d(14, 28, kernel_size=(1, 1)), nn.BatchNorm2d(28), nn.ELU(), nn.AvgPool2d((1, 4)), nn.Dropout(drop_prob))) # ============== Classifier ================== self.add_module("classifier", nn.Sequential( torch.nn.Flatten(), nn.Linear(int(int(input_window_samples / 4) / 4) * 28, n_classes), nn.Softmax(dim=1))) @staticmethod def _get_inception_branch(in_channels, out_channels, kernel_length, depth_multiplier=1): return nn.Sequential( nn.Conv2d( 1, out_channels, kernel_size=(1, kernel_length), padding="same", bias=False ), nn.BatchNorm2d(out_channels), _DepthwiseConv2d( out_channels, kernel_size=(in_channels, 1), depth_multiplier=depth_multiplier, bias=False, padding="valid", ), nn.BatchNorm2d(out_channels), nn.ELU())
PypiClean
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/backends/ted_storage.py
# Licensed under the MIT license # http://opensource.org/licenses/mit-license.php # Copyright 2008, Benjamin Kampmann <[email protected]> """ Another simple rss based Media Server, this time for TED.com content """ # I can reuse stuff. cool. But that also means we might want to refactor it into # a base class to reuse from coherence.backends.lolcats_storage import LolcatsStore from coherence.backends.appletrailers_storage import Container from coherence.backend import BackendItem from coherence.upnp.core import DIDLLite class TedTalk(BackendItem): def __init__(self, parent_id, id, title=None, url=None, duration=None, size=None): BackendItem.__init__(self) self.parentid = parent_id self.update_id = 0 self.id = id self.location = url self.name = title self.item = DIDLLite.VideoItem(id, parent_id, self.name) res = DIDLLite.Resource(self.location, 'http-get:*:video/mp4:*') # FIXME should be video/x-m4a res.size = size res.duration = duration self.item.res.append(res) class TEDStore(LolcatsStore): implements = ['MediaServer'] rss_url = "http://feeds.feedburner.com/tedtalks_video?format=xml" ROOT_ID = 0 def __init__(self, server, *args, **kwargs): LolcatsStore.__init__(self, server, **kwargs) self.name = kwargs.get('name', 'TEDtalks') self.refresh = int(kwargs.get('refresh', 1)) * (60 * 60) self.next_id = 1001 self.last_updated = None self.container = Container(None, self.ROOT_ID, self.name) self.videos = {} dfr = self.update_data() dfr.addCallback(self.init_completed) def get_by_id(self, id): if int(id) == self.ROOT_ID: return self.container return self.videos.get(int(id), None) def upnp_init(self): if self.server: self.server.connection_manager_server.set_variable( \ 0, 'SourceProtocolInfo', ['http-get:*:video/mp4:*']) def parse_data(self, xml_data): root = xml_data.getroot() pub_date = root.find('./channel/lastBuildDate').text if pub_date == self.last_updated: return self.last_updated = pub_date self.container.children = [] self.videos = {} # FIXME: move these to generic constants somewhere mrss = './{http://search.yahoo.com/mrss/}' itunes = './{http://www.itunes.com/dtds/podcast-1.0.dtd}' url_item = mrss + 'content' duration = itunes + 'duration' summary = itunes + 'summary' for item in root.findall('./channel/item'): data = {} data['parent_id'] = self.ROOT_ID data['id'] = self.next_id data['title'] = item.find('./title').text.replace('TEDTalks : ', '') # data ['summary'] = item.find(summary).text # data ['duration'] = item.find(duration).text try: media_entry = item.find(url_item) data['url'] = media_entry.get('url', None) data['size'] = media_entry.get('size', None) except IndexError: continue video = TedTalk(**data) self.container.children.append(video) self.videos[self.next_id] = video self.next_id += 1 self.container.update_id += 1 self.update_id += 1 if self.server and hasattr(self.server, 'content_directory_server'): # the content_directory_server may not yet be initialised self.server.content_directory_server.set_variable(0, 'SystemUpdateID', self.update_id) value = (self.ROOT_ID, self.container.update_id) self.server.content_directory_server.set_variable(0, 'ContainerUpdateIDs', value)
PypiClean
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_categories/get.py
from dataclasses import dataclass import typing_extensions import urllib3 from urllib3._collections import HTTPHeaderDict from firefly_iii_client import api_client, exceptions from datetime import date, datetime # noqa: F401 import decimal # noqa: F401 import functools # noqa: F401 import io # noqa: F401 import re # noqa: F401 import typing # noqa: F401 import typing_extensions # noqa: F401 import uuid # noqa: F401 import frozendict # noqa: F401 from firefly_iii_client import schemas # noqa: F401 from firefly_iii_client.model.category_array import CategoryArray from firefly_iii_client.model.unauthenticated import Unauthenticated from firefly_iii_client.model.bad_request import BadRequest from firefly_iii_client.model.internal_exception import InternalException from firefly_iii_client.model.not_found import NotFound from . import path # Query params PageSchema = schemas.IntSchema RequestRequiredQueryParams = typing_extensions.TypedDict( 'RequestRequiredQueryParams', { } ) RequestOptionalQueryParams = typing_extensions.TypedDict( 'RequestOptionalQueryParams', { 'page': typing.Union[PageSchema, decimal.Decimal, int, ], }, total=False ) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): pass request_query_page = api_client.QueryParameter( name="page", style=api_client.ParameterStyle.FORM, schema=PageSchema, explode=True, ) # Header params XTraceIdSchema = schemas.UUIDSchema RequestRequiredHeaderParams = typing_extensions.TypedDict( 'RequestRequiredHeaderParams', { } ) RequestOptionalHeaderParams = typing_extensions.TypedDict( 'RequestOptionalHeaderParams', { 'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ], }, total=False ) class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams): pass request_header_x_trace_id = api_client.HeaderParameter( name="X-Trace-Id", style=api_client.ParameterStyle.SIMPLE, schema=XTraceIdSchema, ) _auth = [ 'firefly_iii_auth', ] SchemaFor200ResponseBodyApplicationVndApijson = CategoryArray @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor200ResponseBodyApplicationVndApijson, ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ 'application/vnd.api+json': api_client.MediaType( schema=SchemaFor200ResponseBodyApplicationVndApijson), }, ) SchemaFor400ResponseBodyApplicationJson = BadRequest @dataclass class ApiResponseFor400(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor400ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_400 = api_client.OpenApiResponse( response_cls=ApiResponseFor400, content={ 'application/json': api_client.MediaType( schema=SchemaFor400ResponseBodyApplicationJson), }, ) SchemaFor401ResponseBodyApplicationJson = Unauthenticated @dataclass class ApiResponseFor401(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor401ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_401 = api_client.OpenApiResponse( response_cls=ApiResponseFor401, content={ 'application/json': api_client.MediaType( schema=SchemaFor401ResponseBodyApplicationJson), }, ) SchemaFor404ResponseBodyApplicationJson = NotFound @dataclass class ApiResponseFor404(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor404ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_404 = api_client.OpenApiResponse( response_cls=ApiResponseFor404, content={ 'application/json': api_client.MediaType( schema=SchemaFor404ResponseBodyApplicationJson), }, ) SchemaFor500ResponseBodyApplicationJson = InternalException @dataclass class ApiResponseFor500(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ SchemaFor500ResponseBodyApplicationJson, ] headers: schemas.Unset = schemas.unset _response_for_500 = api_client.OpenApiResponse( response_cls=ApiResponseFor500, content={ 'application/json': api_client.MediaType( schema=SchemaFor500ResponseBodyApplicationJson), }, ) _status_code_to_response = { '200': _response_for_200, '400': _response_for_400, '401': _response_for_401, '404': _response_for_404, '500': _response_for_500, } _all_accept_content_types = ( 'application/vnd.api+json', 'application/json', ) class BaseApi(api_client.Api): @typing.overload def _list_category_oapg( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def _list_category_oapg( self, skip_deserialization: typing_extensions.Literal[True], query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_category_oapg( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def _list_category_oapg( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ List all categories. :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances """ self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params) used_path = path.value prefix_separator_iterator = None for parameter in ( request_query_page, ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue if prefix_separator_iterator is None: prefix_separator_iterator = parameter.get_prefix_separator_iterator() serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) for serialized_value in serialized_data.values(): used_path += serialized_value _headers = HTTPHeaderDict() for parameter in ( request_header_x_trace_id, ): parameter_data = header_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue serialized_data = parameter.serialize(parameter_data) _headers.extend(serialized_data) # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, timeout=timeout, ) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: raise exceptions.ApiException( status=response.status, reason=response.reason, api_response=api_response ) return api_response class ListCategory(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload def list_category( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def list_category( self, skip_deserialization: typing_extensions.Literal[True], query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_category( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def list_category( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._list_category_oapg( query_params=query_params, header_params=header_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization ) class ApiForget(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload def get( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor200, ]: ... @typing.overload def get( self, skip_deserialization: typing_extensions.Literal[True], query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor200, api_client.ApiResponseWithoutDeserialization, ]: ... def get( self, query_params: RequestQueryParams = frozendict.frozendict(), header_params: RequestHeaderParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._list_category_oapg( query_params=query_params, header_params=header_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization )
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/lang/async.js.uncompressed.js
define("dojox/lang/async", ["dijit","dojo","dojox"], function(dijit,dojo,dojox){ dojo.provide("dojox.lang.async"); (function(){ var d = dojo, Deferred = d.Deferred, each = d.forEach, some = d.some, async = dojox.lang.async, aps = Array.prototype.slice, opts = Object.prototype.toString; async.seq = function(x){ // summary: // Executes functions sequentially. Waits if any of them returns Deferred. var fs = opts.call(x) == "[object Array]" ? x : arguments; return function(init){ var x = new Deferred(); each(fs, function(f){ x.addCallback(f); }); x.callback(init); return x; }; }; async.par = function(x){ // summary: // Executes functions in parallel. Waits for all of them to finish. var fs = opts.call(x) == "[object Array]" ? x : arguments; return function(init){ var results = new Array(fs.length), cancel = function(){ each(results, function(v){ if(v instanceof Deferred && v.fired < 0){ v.cancel(); } }); }, x = new Deferred(cancel), ready = fs.length; each(fs, function(f, i){ var x; try { x = f(init); }catch(e){ x = e; } results[i] = x; }); var failed = some(results, function(v){ if(v instanceof Error){ cancel(); x.errback(v); return true; } return false; }); if(!failed){ each(results, function(v, i){ if(v instanceof Deferred){ v.addCallbacks( function(v){ results[i] = v; if(!--ready){ x.callback(results); } }, function(v){ cancel(); x.errback(v); } ); }else{ --ready; } }); } if(!ready){ x.callback(results); } return x; }; }; async.any = function(x){ // summary: // Executes functions in parallel. As soon as one of them finishes // cancels the rest. var fs = opts.call(x) == "[object Array]" ? x : arguments; return function(init){ var results = new Array(fs.length), noResult = true; cancel = function(index){ each(results, function(v, i){ if(i != index && v instanceof Deferred && v.fired < 0){ v.cancel(); } }); }, x = new Deferred(cancel); each(fs, function(f, i){ var x; try { x = f(init); }catch(e){ x = e; } results[i] = x; }); var done = some(results, function(v, i){ if(!(v instanceof Deferred)){ cancel(i); x.callback(v); return true; } return false; }); if(!done){ each(results, function(v, i){ v.addBoth( function(v){ if(noResult){ noResult = false; cancel(i); x.callback(v); } } ); }); } return x; }; }; async.select = function(cond, x){ // summary: // Executes a condition, waits for it if necessary, and executes // Nth function from list. var fs = opts.call(x) == "[object Array]" ? x : aps.call(arguments, 1); return function(init){ return new Deferred().addCallback(cond).addCallback(function(v){ if(typeof v == "number" && v >= 0 && v < fs.length){ return fs[v](init); }else{ return new Error("async.select: out of range"); } }).callback(init); }; }; async.ifThen = function(cond, ifTrue, ifFalse){ // summary: // Executes a condition, waits for it if necessary, and executes // one of two functions. return function(init){ return new Deferred().addCallback(cond).addCallback(function(v){ return (v ? ifTrue : ifFalse)(init); }).callback(init); }; }; async.loop = function(cond, body){ // summary: // Executes a condition, waits for it if necessary, and executes // the body, if truthy value was returned. // Then it repeats the cycle until the condition function returns // a falsy value. return function(init){ var x, y = new Deferred(function(){ x.cancel(); }); function ifErr(v){ y.errback(v); } function loop(v){ if(v){ x.addCallback(body).addCallback(setUp); }else{ y.callback(v); } return v; } function setUp(init){ x = new Deferred(). addCallback(cond). addCallback(loop). addErrback(ifErr); x.callback(init); } setUp(init); return y; }; }; })(); /* Design decisions: seq() - behaves like the normal Deferred callback chain. par() - if error, all pending Deferreds are cancelled and the error is signaled, otherwise return an array of all results. any() - just like par() but only one result is returned. select() - any error is returned, otherwise the selected result is returned. loop() - any error is returned, otherwise the last result is returned. */ });
PypiClean
/ClueMapper-0.7.tar.gz/ClueMapper-0.7/src/clue/app/tracplugins/user.py
import traceback from StringIO import StringIO import time from trac import core from trac import config from trac.db import api as dbapi from tracusermanager import api as umapi from trac.util.translation import _ class ClueMapperDatabaseManager(dbapi.DatabaseManager): connection_uri = config.Option('cluemapper', 'database', 'sqlite:etc/cluemapper/cluemapper.db', """Database connection [wiki:TracEnvironment#DatabaseConnectionStrings string] for this project""") timeout = config.IntOption('cluemapper', 'timeout', '20', """Timeout value for database connection, in seconds. Use '0' to specify ''no timeout''. ''(Since 0.11)''""") class ClueMapperUserStore(core.Component): core.implements(umapi.IUserStore) def __init__(self): self.dbm = ClueMapperDatabaseManager(self.compmgr) def get_supported_user_operations(self, username): return [] def execute_user_operation(self, operation, user, operation_arguments): return True def create_user(self, username): db = self.dbm.get_connection() cursor = db.cursor() try: cursor.execute("DELETE FROM user_info WHERE " "username=%s AND name='created'", [username]) cursor.execute('INSERT INTO user_info (username, name, value) ' 'VALUES (%s,%s,%s)', [username, 'created', int(time.time())]) db.commit() except Exception, e: self.log.debug("User already exists, no need to re-create it." % (username)) def search_users(self, username_pattern=None): db = self.dbm.get_connection() cursor = db.cursor() search_result = [] try: if username_pattern is None: cursor.execute("SELECT username FROM user_info " "WHERE name='created'") else: cursor.execute("SELECT username FROM user_info " "WHERE name='created' AND username LIKE %s", username_pattern) for username, in cursor: search_result.append(username) except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise core.TracError(("Unable to search users [%s].") % (username_pattern)) return search_result def delete_user(self, username): db = self.dbm.get_connection() cursor = db.cursor() try: cursor.execute('DELETE FROM ' 'user_info ' 'WHERE username=%s', [username]) db.commit() return True except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise core.TracError(_("Unable to delete user [%s].")%(username)) return False class ClueMapperAttributeProvider(core.Component): core.implements(umapi.IAttributeProvider) def __init__(self): self.dbm = ClueMapperDatabaseManager(self.compmgr) def get_user_attribute(self, username, attribute): db = self.dbm.get_connection() cursor = db.cursor() try: cursor.execute("SELECT value FROM user_info " "WHERE username=%s AND name=%s", (username, attribute)) _result = list(cursor) if len(_result)>0: return _result[0][0] except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise core.TracError(_("Unable to load attribute %s for user " "[%s].") % (attribute, username)) return None def set_user_attribute(self, username, attribute, value): """Sets user's attribute value. @param username: str @param attribute: str @param value: str @return: bool """ db = self.dbm.get_connection() cursor = db.cursor() try: cursor.execute("DELETE FROM user_info " "WHERE username=%s AND name=%s", [username, attribute]) cursor.execute("INSERT INTO user_info " "(username, name, value) VALUES (%s, %s, %s)", [username, attribute, value]) db.commit() return True except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise core.TracError("Unable to set attribute %s for " "user [%s]."%(attribute, username)) return False def delete_user_attribute(self, username, attribute): """Removes user attribute. @param username: str @param attribute: str @return: bool """ db = self.dbm.get_connection() cursor = db.cursor() try: cursor.execute("DELETE FROM user_info " "WHERE username=%s and name=%s", [username, attribute]) db.commit() return True except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) raise core.TracError("Unable to delete attribute %s " "for user [%s]."%(attribute, username)) return False def get_usernames_with_attributes(self, attributes_dict=None): """ Returns all usernames matching attributes_dict. Example: self.get_usernames_with_attributes(dict(name='John%', email='%')) @param attributes_dict: dict @return: list """ db = self.dbm.get_connection() cursor = db.cursor() try: if attributes_dict is None: cursor.execute("SELECT username FROM user_info") else: """@note: [TO DO] Redo this query in order to avoid SQL Injection! The following line executes a query that should look like this: (for dict(name='John%', email='%@exemple.com')): SELECT sid, count(sid) cnt FROM session_attribute WHERE name='name' AND value like 'John%' OR name='email' AND value like '%@exemple.com' GROUP BY sid HAVING cnt=2 """ def _get_condition(k, v): is_not = k.startswith('NOT_') return "name='%s' AND value %sLIKE '%s'"%(is_not and k[4:] or k, is_not and 'NOT ' or '', v) cursor.execute("SELECT username, count(username) cnt FROM user_info WHERE %s GROUP BY username HAVING cnt=%s"% (" OR ".join([ _get_condition(k,v) for k,v in attributes_dict.items()]), len(attributes_dict.items()))) return [id for id, cnd in cursor] except Exception, e: out = StringIO() traceback.print_exc(file=out) self.log.error('%s: %s\n%s' % (self.__class__.__name__, str(e), out.getvalue())) return []
PypiClean
/MIAvisual-0.0.6-py3-none-any.whl/matplotlib/font_manager.py
import dataclasses from functools import lru_cache import json import logging from numbers import Number import os from pathlib import Path import re import subprocess import sys try: import threading from threading import Timer except ImportError: import dummy_threading as threading from dummy_threading import Timer import matplotlib as mpl from matplotlib import _api, afm, cbook, ft2font, rcParams from matplotlib.fontconfig_pattern import ( parse_fontconfig_pattern, generate_fontconfig_pattern) from matplotlib.rcsetup import _validators _log = logging.getLogger(__name__) font_scalings = { 'xx-small': 0.579, 'x-small': 0.694, 'small': 0.833, 'medium': 1.0, 'large': 1.200, 'x-large': 1.440, 'xx-large': 1.728, 'larger': 1.2, 'smaller': 0.833, None: 1.0, } stretch_dict = { 'ultra-condensed': 100, 'extra-condensed': 200, 'condensed': 300, 'semi-condensed': 400, 'normal': 500, 'semi-expanded': 600, 'semi-extended': 600, 'expanded': 700, 'extended': 700, 'extra-expanded': 800, 'extra-extended': 800, 'ultra-expanded': 900, 'ultra-extended': 900, } weight_dict = { 'ultralight': 100, 'light': 200, 'normal': 400, 'regular': 400, 'book': 400, 'medium': 500, 'roman': 500, 'semibold': 600, 'demibold': 600, 'demi': 600, 'bold': 700, 'heavy': 800, 'extra bold': 800, 'black': 900, } _weight_regexes = [ # From fontconfig's FcFreeTypeQueryFaceInternal; not the same as # weight_dict! ("thin", 100), ("extralight", 200), ("ultralight", 200), ("demilight", 350), ("semilight", 350), ("light", 300), # Needs to come *after* demi/semilight! ("book", 380), ("regular", 400), ("normal", 400), ("medium", 500), ("demibold", 600), ("demi", 600), ("semibold", 600), ("extrabold", 800), ("superbold", 800), ("ultrabold", 800), ("bold", 700), # Needs to come *after* extra/super/ultrabold! ("ultrablack", 1000), ("superblack", 1000), ("extrablack", 1000), (r"\bultra", 1000), ("black", 900), # Needs to come *after* ultra/super/extrablack! ("heavy", 900), ] font_family_aliases = { 'serif', 'sans-serif', 'sans serif', 'cursive', 'fantasy', 'monospace', 'sans', } # OS Font paths try: _HOME = Path.home() except Exception: # Exceptions thrown by home() are not specified... _HOME = Path(os.devnull) # Just an arbitrary path with no children. MSFolders = \ r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' MSFontDirectories = [ r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts', r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts'] MSUserFontDirectories = [ str(_HOME / 'AppData/Local/Microsoft/Windows/Fonts'), str(_HOME / 'AppData/Roaming/Microsoft/Windows/Fonts'), ] X11FontDirectories = [ # an old standard installation point "/usr/X11R6/lib/X11/fonts/TTF/", "/usr/X11/lib/X11/fonts", # here is the new standard location for fonts "/usr/share/fonts/", # documented as a good place to install new fonts "/usr/local/share/fonts/", # common application, not really useful "/usr/lib/openoffice/share/fonts/truetype/", # user fonts str((Path(os.environ.get('XDG_DATA_HOME') or _HOME / ".local/share")) / "fonts"), str(_HOME / ".fonts"), ] OSXFontDirectories = [ "/Library/Fonts/", "/Network/Library/Fonts/", "/System/Library/Fonts/", # fonts installed via MacPorts "/opt/local/share/fonts", # user fonts str(_HOME / "Library/Fonts"), ] @lru_cache(64) def _cached_realpath(path): return os.path.realpath(path) def get_fontext_synonyms(fontext): """ Return a list of file extensions extensions that are synonyms for the given file extension *fileext*. """ return { 'afm': ['afm'], 'otf': ['otf', 'ttc', 'ttf'], 'ttc': ['otf', 'ttc', 'ttf'], 'ttf': ['otf', 'ttc', 'ttf'], }[fontext] def list_fonts(directory, extensions): """ Return a list of all fonts matching any of the extensions, found recursively under the directory. """ extensions = ["." + ext for ext in extensions] return [os.path.join(dirpath, filename) # os.walk ignores access errors, unlike Path.glob. for dirpath, _, filenames in os.walk(directory) for filename in filenames if Path(filename).suffix.lower() in extensions] def win32FontDirectory(): r""" Return the user-specified font directory for Win32. This is looked up from the registry key :: \\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts If the key is not found, ``%WINDIR%\Fonts`` will be returned. """ import winreg try: with winreg.OpenKey(winreg.HKEY_CURRENT_USER, MSFolders) as user: return winreg.QueryValueEx(user, 'Fonts')[0] except OSError: return os.path.join(os.environ['WINDIR'], 'Fonts') def _win32RegistryFonts(reg_domain, base_dir): r""" Search for fonts in the Windows registry. Parameters ---------- reg_domain : int The top level registry domain (e.g. HKEY_LOCAL_MACHINE). base_dir : str The path to the folder where the font files are usually located (e.g. C:\Windows\Fonts). If only the filename of the font is stored in the registry, the absolute path is built relative to this base directory. Returns ------- `set` `pathlib.Path` objects with the absolute path to the font files found. """ import winreg items = set() for reg_path in MSFontDirectories: try: with winreg.OpenKey(reg_domain, reg_path) as local: for j in range(winreg.QueryInfoKey(local)[1]): # value may contain the filename of the font or its # absolute path. key, value, tp = winreg.EnumValue(local, j) if not isinstance(value, str): continue try: # If value contains already an absolute path, then it # is not changed further. path = Path(base_dir, value).resolve() except RuntimeError: # Don't fail with invalid entries. continue items.add(path) except (OSError, MemoryError): continue return items # Also remove _win32RegistryFonts when this is removed. @_api.deprecated("3.5") def win32InstalledFonts(directory=None, fontext='ttf'): """ Search for fonts in the specified font directory, or use the system directories if none given. Additionally, it is searched for user fonts installed. A list of TrueType font filenames are returned by default, or AFM fonts if *fontext* == 'afm'. """ import winreg if directory is None: directory = win32FontDirectory() fontext = ['.' + ext for ext in get_fontext_synonyms(fontext)] items = set() # System fonts items.update(_win32RegistryFonts(winreg.HKEY_LOCAL_MACHINE, directory)) # User fonts for userdir in MSUserFontDirectories: items.update(_win32RegistryFonts(winreg.HKEY_CURRENT_USER, userdir)) # Keep only paths with matching file extension. return [str(path) for path in items if path.suffix.lower() in fontext] def _get_win32_installed_fonts(): """List the font paths known to the Windows registry.""" import winreg items = set() # Search and resolve fonts listed in the registry. for domain, base_dirs in [ (winreg.HKEY_LOCAL_MACHINE, [win32FontDirectory()]), # System. (winreg.HKEY_CURRENT_USER, MSUserFontDirectories), # User. ]: for base_dir in base_dirs: for reg_path in MSFontDirectories: try: with winreg.OpenKey(domain, reg_path) as local: for j in range(winreg.QueryInfoKey(local)[1]): # value may contain the filename of the font or its # absolute path. key, value, tp = winreg.EnumValue(local, j) if not isinstance(value, str): continue try: # If value contains already an absolute path, # then it is not changed further. path = Path(base_dir, value).resolve() except RuntimeError: # Don't fail with invalid entries. continue items.add(path) except (OSError, MemoryError): continue return items @lru_cache() def _get_fontconfig_fonts(): """Cache and list the font paths known to `fc-list`.""" try: if b'--format' not in subprocess.check_output(['fc-list', '--help']): _log.warning( # fontconfig 2.7 implemented --format. 'Matplotlib needs fontconfig>=2.7 to query system fonts.') return [] out = subprocess.check_output(['fc-list', '--format=%{file}\\n']) except (OSError, subprocess.CalledProcessError): return [] return [Path(os.fsdecode(fname)) for fname in out.split(b'\n')] @_api.deprecated("3.5") def get_fontconfig_fonts(fontext='ttf'): """List font filenames known to `fc-list` having the given extension.""" fontext = ['.' + ext for ext in get_fontext_synonyms(fontext)] return [str(path) for path in _get_fontconfig_fonts() if path.suffix.lower() in fontext] def findSystemFonts(fontpaths=None, fontext='ttf'): """ Search for fonts in the specified font paths. If no paths are given, will use a standard set of system paths, as well as the list of fonts tracked by fontconfig if fontconfig is installed and available. A list of TrueType fonts are returned by default with AFM fonts as an option. """ fontfiles = set() fontexts = get_fontext_synonyms(fontext) if fontpaths is None: if sys.platform == 'win32': installed_fonts = _get_win32_installed_fonts() fontpaths = MSUserFontDirectories + [win32FontDirectory()] else: installed_fonts = _get_fontconfig_fonts() if sys.platform == 'darwin': fontpaths = [*X11FontDirectories, *OSXFontDirectories] else: fontpaths = X11FontDirectories fontfiles.update(str(path) for path in installed_fonts if path.suffix.lower()[1:] in fontexts) elif isinstance(fontpaths, str): fontpaths = [fontpaths] for path in fontpaths: fontfiles.update(map(os.path.abspath, list_fonts(path, fontexts))) return [fname for fname in fontfiles if os.path.exists(fname)] FontEntry = dataclasses.make_dataclass( 'FontEntry', [ ('fname', str, dataclasses.field(default='')), ('name', str, dataclasses.field(default='')), ('style', str, dataclasses.field(default='normal')), ('variant', str, dataclasses.field(default='normal')), ('weight', str, dataclasses.field(default='normal')), ('stretch', str, dataclasses.field(default='normal')), ('size', str, dataclasses.field(default='medium')), ], namespace={ '__doc__': """ A class for storing Font properties. It is used when populating the font lookup dictionary. """}) def ttfFontProperty(font): """ Extract information from a TrueType font file. Parameters ---------- font : `.FT2Font` The TrueType font file from which information will be extracted. Returns ------- `FontEntry` The extracted font properties. """ name = font.family_name # Styles are: italic, oblique, and normal (default) sfnt = font.get_sfnt() mac_key = (1, # platform: macintosh 0, # id: roman 0) # langid: english ms_key = (3, # platform: microsoft 1, # id: unicode_cs 0x0409) # langid: english_united_states # These tables are actually mac_roman-encoded, but mac_roman support may be # missing in some alternative Python implementations and we are only going # to look for ASCII substrings, where any ASCII-compatible encoding works # - or big-endian UTF-16, since important Microsoft fonts use that. sfnt2 = (sfnt.get((*mac_key, 2), b'').decode('latin-1').lower() or sfnt.get((*ms_key, 2), b'').decode('utf_16_be').lower()) sfnt4 = (sfnt.get((*mac_key, 4), b'').decode('latin-1').lower() or sfnt.get((*ms_key, 4), b'').decode('utf_16_be').lower()) if sfnt4.find('oblique') >= 0: style = 'oblique' elif sfnt4.find('italic') >= 0: style = 'italic' elif sfnt2.find('regular') >= 0: style = 'normal' elif font.style_flags & ft2font.ITALIC: style = 'italic' else: style = 'normal' # Variants are: small-caps and normal (default) # !!!! Untested if name.lower() in ['capitals', 'small-caps']: variant = 'small-caps' else: variant = 'normal' # The weight-guessing algorithm is directly translated from fontconfig # 2.13.1's FcFreeTypeQueryFaceInternal (fcfreetype.c). wws_subfamily = 22 typographic_subfamily = 16 font_subfamily = 2 styles = [ sfnt.get((*mac_key, wws_subfamily), b'').decode('latin-1'), sfnt.get((*mac_key, typographic_subfamily), b'').decode('latin-1'), sfnt.get((*mac_key, font_subfamily), b'').decode('latin-1'), sfnt.get((*ms_key, wws_subfamily), b'').decode('utf-16-be'), sfnt.get((*ms_key, typographic_subfamily), b'').decode('utf-16-be'), sfnt.get((*ms_key, font_subfamily), b'').decode('utf-16-be'), ] styles = [*filter(None, styles)] or [font.style_name] def get_weight(): # From fontconfig's FcFreeTypeQueryFaceInternal. # OS/2 table weight. os2 = font.get_sfnt_table("OS/2") if os2 and os2["version"] != 0xffff: return os2["usWeightClass"] # PostScript font info weight. try: ps_font_info_weight = ( font.get_ps_font_info()["weight"].replace(" ", "") or "") except ValueError: pass else: for regex, weight in _weight_regexes: if re.fullmatch(regex, ps_font_info_weight, re.I): return weight # Style name weight. for style in styles: style = style.replace(" ", "") for regex, weight in _weight_regexes: if re.search(regex, style, re.I): return weight if font.style_flags & ft2font.BOLD: return 700 # "bold" return 500 # "medium", not "regular"! weight = int(get_weight()) # Stretch can be absolute and relative # Absolute stretches are: ultra-condensed, extra-condensed, condensed, # semi-condensed, normal, semi-expanded, expanded, extra-expanded, # and ultra-expanded. # Relative stretches are: wider, narrower # Child value is: inherit if any(word in sfnt4 for word in ['narrow', 'condensed', 'cond']): stretch = 'condensed' elif 'demi cond' in sfnt4: stretch = 'semi-condensed' elif any(word in sfnt4 for word in ['wide', 'expanded', 'extended']): stretch = 'expanded' else: stretch = 'normal' # Sizes can be absolute and relative. # Absolute sizes are: xx-small, x-small, small, medium, large, x-large, # and xx-large. # Relative sizes are: larger, smaller # Length value is an absolute font size, e.g., 12pt # Percentage values are in 'em's. Most robust specification. if not font.scalable: raise NotImplementedError("Non-scalable fonts are not supported") size = 'scalable' return FontEntry(font.fname, name, style, variant, weight, stretch, size) def afmFontProperty(fontpath, font): """ Extract information from an AFM font file. Parameters ---------- font : `.AFM` The AFM font file from which information will be extracted. Returns ------- `FontEntry` The extracted font properties. """ name = font.get_familyname() fontname = font.get_fontname().lower() # Styles are: italic, oblique, and normal (default) if font.get_angle() != 0 or 'italic' in name.lower(): style = 'italic' elif 'oblique' in name.lower(): style = 'oblique' else: style = 'normal' # Variants are: small-caps and normal (default) # !!!! Untested if name.lower() in ['capitals', 'small-caps']: variant = 'small-caps' else: variant = 'normal' weight = font.get_weight().lower() if weight not in weight_dict: weight = 'normal' # Stretch can be absolute and relative # Absolute stretches are: ultra-condensed, extra-condensed, condensed, # semi-condensed, normal, semi-expanded, expanded, extra-expanded, # and ultra-expanded. # Relative stretches are: wider, narrower # Child value is: inherit if 'demi cond' in fontname: stretch = 'semi-condensed' elif any(word in fontname for word in ['narrow', 'cond']): stretch = 'condensed' elif any(word in fontname for word in ['wide', 'expanded', 'extended']): stretch = 'expanded' else: stretch = 'normal' # Sizes can be absolute and relative. # Absolute sizes are: xx-small, x-small, small, medium, large, x-large, # and xx-large. # Relative sizes are: larger, smaller # Length value is an absolute font size, e.g., 12pt # Percentage values are in 'em's. Most robust specification. # All AFM fonts are apparently scalable. size = 'scalable' return FontEntry(fontpath, name, style, variant, weight, stretch, size) class FontProperties: """ A class for storing and manipulating font properties. The font properties are the six properties described in the `W3C Cascading Style Sheet, Level 1 <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font specification and *math_fontfamily* for math fonts: - family: A list of font names in decreasing order of priority. The items may include a generic font family name, either 'sans-serif' (default), 'serif', 'cursive', 'fantasy', or 'monospace'. In that case, the actual font to be used will be looked up from the associated rcParam. - style: Either 'normal' (default), 'italic' or 'oblique'. - variant: Either 'normal' (default) or 'small-caps'. - stretch: A numeric value in the range 0-1000 or one of 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal' (default), 'semi-expanded', 'expanded', 'extra-expanded' or 'ultra-expanded'. - weight: A numeric value in the range 0-1000 or one of 'ultralight', 'light', 'normal' (default), 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black'. - size: Either an relative value of 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or an absolute font size, e.g., 10 (default). - math_fontfamily: The family of fonts used to render math text; overrides :rc:`mathtext.fontset`. Supported values are the same as the ones supported by :rc:`mathtext.fontset`: 'dejavusans', 'dejavuserif', 'cm', 'stix', 'stixsans' and 'custom'. Alternatively, a font may be specified using the absolute path to a font file, by using the *fname* kwarg. However, in this case, it is typically simpler to just pass the path (as a `pathlib.Path`, not a `str`) to the *font* kwarg of the `.Text` object. The preferred usage of font sizes is to use the relative values, e.g., 'large', instead of absolute font sizes, e.g., 12. This approach allows all text sizes to be made larger or smaller based on the font manager's default font size. This class will also accept a fontconfig_ pattern_, if it is the only argument provided. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here. .. _fontconfig: https://www.freedesktop.org/wiki/Software/fontconfig/ .. _pattern: https://www.freedesktop.org/software/fontconfig/fontconfig-user.html Note that Matplotlib's internal font manager and fontconfig use a different algorithm to lookup fonts, so the results of the same pattern may be different in Matplotlib than in other applications that use fontconfig. """ def __init__(self, family=None, style=None, variant=None, weight=None, stretch=None, size=None, fname=None, # if set, it's a hardcoded filename to use math_fontfamily=None): self._family = _normalize_font_family(rcParams['font.family']) self._slant = rcParams['font.style'] self._variant = rcParams['font.variant'] self._weight = rcParams['font.weight'] self._stretch = rcParams['font.stretch'] self._size = rcParams['font.size'] self._file = None self.set_math_fontfamily(math_fontfamily) if isinstance(family, str): # Treat family as a fontconfig pattern if it is the only # parameter provided. if (style is None and variant is None and weight is None and stretch is None and size is None and fname is None): self.set_fontconfig_pattern(family) return self.set_family(family) self.set_style(style) self.set_variant(variant) self.set_weight(weight) self.set_stretch(stretch) self.set_file(fname) self.set_size(size) @classmethod def _from_any(cls, arg): """ Generic constructor which can build a `.FontProperties` from any of the following: - a `.FontProperties`: it is passed through as is; - `None`: a `.FontProperties` using rc values is used; - an `os.PathLike`: it is used as path to the font file; - a `str`: it is parsed as a fontconfig pattern; - a `dict`: it is passed as ``**kwargs`` to `.FontProperties`. """ if isinstance(arg, cls): return arg elif arg is None: return cls() elif isinstance(arg, os.PathLike): return cls(fname=arg) elif isinstance(arg, str): return cls(arg) else: return cls(**arg) def __hash__(self): l = (tuple(self.get_family()), self.get_slant(), self.get_variant(), self.get_weight(), self.get_stretch(), self.get_size_in_points(), self.get_file(), self.get_math_fontfamily()) return hash(l) def __eq__(self, other): return hash(self) == hash(other) def __str__(self): return self.get_fontconfig_pattern() def get_family(self): """ Return a list of font names that comprise the font family. """ return self._family def get_name(self): """ Return the name of the font that best matches the font properties. """ return get_font(findfont(self)).family_name def get_style(self): """ Return the font style. Values are: 'normal', 'italic' or 'oblique'. """ return self._slant get_slant = get_style def get_variant(self): """ Return the font variant. Values are: 'normal' or 'small-caps'. """ return self._variant def get_weight(self): """ Set the font weight. Options are: A numeric value in the range 0-1000 or one of 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black' """ return self._weight def get_stretch(self): """ Return the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'. """ return self._stretch def get_size(self): """ Return the font size. """ return self._size def get_size_in_points(self): return self._size def get_file(self): """ Return the filename of the associated font. """ return self._file def get_fontconfig_pattern(self): """ Get a fontconfig_ pattern_ suitable for looking up the font as specified with fontconfig's ``fc-match`` utility. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here. """ return generate_fontconfig_pattern(self) def set_family(self, family): """ Change the font family. May be either an alias (generic name is CSS parlance), such as: 'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace', a real font name or a list of real font names. Real font names are not supported when :rc:`text.usetex` is `True`. """ if family is None: family = rcParams['font.family'] self._family = _normalize_font_family(family) set_name = set_family def set_style(self, style): """ Set the font style. Values are: 'normal', 'italic' or 'oblique'. """ if style is None: style = rcParams['font.style'] _api.check_in_list(['normal', 'italic', 'oblique'], style=style) self._slant = style set_slant = set_style def set_variant(self, variant): """ Set the font variant. Values are: 'normal' or 'small-caps'. """ if variant is None: variant = rcParams['font.variant'] _api.check_in_list(['normal', 'small-caps'], variant=variant) self._variant = variant def set_weight(self, weight): """ Set the font weight. May be either a numeric value in the range 0-1000 or one of 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black' """ if weight is None: weight = rcParams['font.weight'] try: weight = int(weight) if weight < 0 or weight > 1000: raise ValueError() except ValueError: if weight not in weight_dict: raise ValueError("weight is invalid") self._weight = weight def set_stretch(self, stretch): """ Set the font stretch or width. Options are: 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded' or 'ultra-expanded', or a numeric value in the range 0-1000. """ if stretch is None: stretch = rcParams['font.stretch'] try: stretch = int(stretch) if stretch < 0 or stretch > 1000: raise ValueError() except ValueError as err: if stretch not in stretch_dict: raise ValueError("stretch is invalid") from err self._stretch = stretch def set_size(self, size): """ Set the font size. Either an relative value of 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large' or an absolute font size, e.g., 12. """ if size is None: size = rcParams['font.size'] try: size = float(size) except ValueError: try: scale = font_scalings[size] except KeyError as err: raise ValueError( "Size is invalid. Valid font size are " + ", ".join(map(str, font_scalings))) from err else: size = scale * FontManager.get_default_size() if size < 1.0: _log.info('Fontsize %1.2f < 1.0 pt not allowed by FreeType. ' 'Setting fontsize = 1 pt', size) size = 1.0 self._size = size def set_file(self, file): """ Set the filename of the fontfile to use. In this case, all other properties will be ignored. """ self._file = os.fspath(file) if file is not None else None def set_fontconfig_pattern(self, pattern): """ Set the properties by parsing a fontconfig_ *pattern*. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here. """ for key, val in parse_fontconfig_pattern(pattern).items(): if type(val) == list: getattr(self, "set_" + key)(val[0]) else: getattr(self, "set_" + key)(val) def get_math_fontfamily(self): """ Return the name of the font family used for math text. The default font is :rc:`mathtext.fontset`. """ return self._math_fontfamily def set_math_fontfamily(self, fontfamily): """ Set the font family for text in math mode. If not set explicitly, :rc:`mathtext.fontset` will be used. Parameters ---------- fontfamily : str The name of the font family. Available font families are defined in the matplotlibrc.template file :ref:`here <customizing-with-matplotlibrc-files>` See Also -------- .text.Text.get_math_fontfamily """ if fontfamily is None: fontfamily = rcParams['mathtext.fontset'] else: valid_fonts = _validators['mathtext.fontset'].valid.values() # _check_in_list() Validates the parameter math_fontfamily as # if it were passed to rcParams['mathtext.fontset'] _api.check_in_list(valid_fonts, math_fontfamily=fontfamily) self._math_fontfamily = fontfamily def copy(self): """Return a copy of self.""" new = type(self)() vars(new).update(vars(self)) return new class _JSONEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, FontManager): return dict(o.__dict__, __class__='FontManager') elif isinstance(o, FontEntry): d = dict(o.__dict__, __class__='FontEntry') try: # Cache paths of fonts shipped with Matplotlib relative to the # Matplotlib data path, which helps in the presence of venvs. d["fname"] = str( Path(d["fname"]).relative_to(mpl.get_data_path())) except ValueError: pass return d else: return super().default(o) def _json_decode(o): cls = o.pop('__class__', None) if cls is None: return o elif cls == 'FontManager': r = FontManager.__new__(FontManager) r.__dict__.update(o) return r elif cls == 'FontEntry': r = FontEntry.__new__(FontEntry) r.__dict__.update(o) if not os.path.isabs(r.fname): r.fname = os.path.join(mpl.get_data_path(), r.fname) return r else: raise ValueError("Don't know how to deserialize __class__=%s" % cls) def json_dump(data, filename): """ Dump `FontManager` *data* as JSON to the file named *filename*. See Also -------- json_load Notes ----- File paths that are children of the Matplotlib data path (typically, fonts shipped with Matplotlib) are stored relative to that data path (to remain valid across virtualenvs). This function temporarily locks the output file to prevent multiple processes from overwriting one another's output. """ with cbook._lock_path(filename), open(filename, 'w') as fh: try: json.dump(data, fh, cls=_JSONEncoder, indent=2) except OSError as e: _log.warning('Could not save font_manager cache {}'.format(e)) def json_load(filename): """ Load a `FontManager` from the JSON file named *filename*. See Also -------- json_dump """ with open(filename, 'r') as fh: return json.load(fh, object_hook=_json_decode) def _normalize_font_family(family): if isinstance(family, str): family = [family] return family class FontManager: """ On import, the `FontManager` singleton instance creates a list of ttf and afm fonts and caches their `FontProperties`. The `FontManager.findfont` method does a nearest neighbor search to find the font that most closely matches the specification. If no good enough match is found, the default font is returned. """ # Increment this version number whenever the font cache data # format or behavior has changed and requires a existing font # cache files to be rebuilt. __version__ = 330 def __init__(self, size=None, weight='normal'): self._version = self.__version__ self.__default_weight = weight self.default_size = size # Create list of font paths. paths = [cbook._get_data_path('fonts', subdir) for subdir in ['ttf', 'afm', 'pdfcorefonts']] _log.debug('font search path %s', str(paths)) self.defaultFamily = { 'ttf': 'DejaVu Sans', 'afm': 'Helvetica'} self.afmlist = [] self.ttflist = [] # Delay the warning by 5s. timer = Timer(5, lambda: _log.warning( 'Matplotlib is building the font cache; this may take a moment.')) timer.start() try: for fontext in ["afm", "ttf"]: for path in [*findSystemFonts(paths, fontext=fontext), *findSystemFonts(fontext=fontext)]: try: self.addfont(path) except OSError as exc: _log.info("Failed to open font file %s: %s", path, exc) except Exception as exc: _log.info("Failed to extract font properties from %s: " "%s", path, exc) finally: timer.cancel() def addfont(self, path): """ Cache the properties of the font at *path* to make it available to the `FontManager`. The type of font is inferred from the path suffix. Parameters ---------- path : str or path-like """ if Path(path).suffix.lower() == ".afm": with open(path, "rb") as fh: font = afm.AFM(fh) prop = afmFontProperty(path, font) self.afmlist.append(prop) else: font = ft2font.FT2Font(path) prop = ttfFontProperty(font) self.ttflist.append(prop) @property def defaultFont(self): # Lazily evaluated (findfont then caches the result) to avoid including # the venv path in the json serialization. return {ext: self.findfont(family, fontext=ext) for ext, family in self.defaultFamily.items()} def get_default_weight(self): """ Return the default font weight. """ return self.__default_weight @staticmethod def get_default_size(): """ Return the default font size. """ return rcParams['font.size'] def set_default_weight(self, weight): """ Set the default font weight. The initial value is 'normal'. """ self.__default_weight = weight @staticmethod def _expand_aliases(family): if family in ('sans', 'sans serif'): family = 'sans-serif' return rcParams['font.' + family] # Each of the scoring functions below should return a value between # 0.0 (perfect match) and 1.0 (terrible match) def score_family(self, families, family2): """ Return a match score between the list of font families in *families* and the font family name *family2*. An exact match at the head of the list returns 0.0. A match further down the list will return between 0 and 1. No match will return 1.0. """ if not isinstance(families, (list, tuple)): families = [families] elif len(families) == 0: return 1.0 family2 = family2.lower() step = 1 / len(families) for i, family1 in enumerate(families): family1 = family1.lower() if family1 in font_family_aliases: options = [*map(str.lower, self._expand_aliases(family1))] if family2 in options: idx = options.index(family2) return (i + (idx / len(options))) * step elif family1 == family2: # The score should be weighted by where in the # list the font was found. return i * step return 1.0 def score_style(self, style1, style2): """ Return a match score between *style1* and *style2*. An exact match returns 0.0. A match between 'italic' and 'oblique' returns 0.1. No match returns 1.0. """ if style1 == style2: return 0.0 elif (style1 in ('italic', 'oblique') and style2 in ('italic', 'oblique')): return 0.1 return 1.0 def score_variant(self, variant1, variant2): """ Return a match score between *variant1* and *variant2*. An exact match returns 0.0, otherwise 1.0. """ if variant1 == variant2: return 0.0 else: return 1.0 def score_stretch(self, stretch1, stretch2): """ Return a match score between *stretch1* and *stretch2*. The result is the absolute value of the difference between the CSS numeric values of *stretch1* and *stretch2*, normalized between 0.0 and 1.0. """ try: stretchval1 = int(stretch1) except ValueError: stretchval1 = stretch_dict.get(stretch1, 500) try: stretchval2 = int(stretch2) except ValueError: stretchval2 = stretch_dict.get(stretch2, 500) return abs(stretchval1 - stretchval2) / 1000.0 def score_weight(self, weight1, weight2): """ Return a match score between *weight1* and *weight2*. The result is 0.0 if both weight1 and weight 2 are given as strings and have the same value. Otherwise, the result is the absolute value of the difference between the CSS numeric values of *weight1* and *weight2*, normalized between 0.05 and 1.0. """ # exact match of the weight names, e.g. weight1 == weight2 == "regular" if cbook._str_equal(weight1, weight2): return 0.0 w1 = weight1 if isinstance(weight1, Number) else weight_dict[weight1] w2 = weight2 if isinstance(weight2, Number) else weight_dict[weight2] return 0.95 * (abs(w1 - w2) / 1000) + 0.05 def score_size(self, size1, size2): """ Return a match score between *size1* and *size2*. If *size2* (the size specified in the font file) is 'scalable', this function always returns 0.0, since any font size can be generated. Otherwise, the result is the absolute distance between *size1* and *size2*, normalized so that the usual range of font sizes (6pt - 72pt) will lie between 0.0 and 1.0. """ if size2 == 'scalable': return 0.0 # Size value should have already been try: sizeval1 = float(size1) except ValueError: sizeval1 = self.default_size * font_scalings[size1] try: sizeval2 = float(size2) except ValueError: return 1.0 return abs(sizeval1 - sizeval2) / 72 def findfont(self, prop, fontext='ttf', directory=None, fallback_to_default=True, rebuild_if_missing=True): """ Find a font that most closely matches the given font properties. Parameters ---------- prop : str or `~matplotlib.font_manager.FontProperties` The font properties to search for. This can be either a `.FontProperties` object or a string defining a `fontconfig patterns`_. fontext : {'ttf', 'afm'}, default: 'ttf' The extension of the font file: - 'ttf': TrueType and OpenType fonts (.ttf, .ttc, .otf) - 'afm': Adobe Font Metrics (.afm) directory : str, optional If given, only search this directory and its subdirectories. fallback_to_default : bool If True, will fallback to the default font family (usually "DejaVu Sans" or "Helvetica") if the first lookup hard-fails. rebuild_if_missing : bool Whether to rebuild the font cache and search again if the first match appears to point to a nonexisting font (i.e., the font cache contains outdated entries). Returns ------- str The filename of the best matching font. Notes ----- This performs a nearest neighbor search. Each font is given a similarity score to the target font properties. The first font with the highest score is returned. If no matches below a certain threshold are found, the default font (usually DejaVu Sans) is returned. The result is cached, so subsequent lookups don't have to perform the O(n) nearest neighbor search. See the `W3C Cascading Style Sheet, Level 1 <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation for a description of the font finding algorithm. .. _fontconfig patterns: https://www.freedesktop.org/software/fontconfig/fontconfig-user.html """ # Pass the relevant rcParams (and the font manager, as `self`) to # _findfont_cached so to prevent using a stale cache entry after an # rcParam was changed. rc_params = tuple(tuple(rcParams[key]) for key in [ "font.serif", "font.sans-serif", "font.cursive", "font.fantasy", "font.monospace"]) return self._findfont_cached( prop, fontext, directory, fallback_to_default, rebuild_if_missing, rc_params) @lru_cache() def _findfont_cached(self, prop, fontext, directory, fallback_to_default, rebuild_if_missing, rc_params): prop = FontProperties._from_any(prop) fname = prop.get_file() if fname is not None: return fname if fontext == 'afm': fontlist = self.afmlist else: fontlist = self.ttflist best_score = 1e64 best_font = None _log.debug('findfont: Matching %s.', prop) for font in fontlist: if (directory is not None and Path(directory) not in Path(font.fname).parents): continue # Matching family should have top priority, so multiply it by 10. score = (self.score_family(prop.get_family(), font.name) * 10 + self.score_style(prop.get_style(), font.style) + self.score_variant(prop.get_variant(), font.variant) + self.score_weight(prop.get_weight(), font.weight) + self.score_stretch(prop.get_stretch(), font.stretch) + self.score_size(prop.get_size(), font.size)) _log.debug('findfont: score(%s) = %s', font, score) if score < best_score: best_score = score best_font = font if score == 0: break if best_font is None or best_score >= 10.0: if fallback_to_default: _log.warning( 'findfont: Font family %s not found. Falling back to %s.', prop.get_family(), self.defaultFamily[fontext]) for family in map(str.lower, prop.get_family()): if family in font_family_aliases: _log.warning( "findfont: Generic family %r not found because " "none of the following families were found: %s", family, ", ".join(self._expand_aliases(family))) default_prop = prop.copy() default_prop.set_family(self.defaultFamily[fontext]) return self.findfont(default_prop, fontext, directory, fallback_to_default=False) else: raise ValueError(f"Failed to find font {prop}, and fallback " f"to the default font was disabled") else: _log.debug('findfont: Matching %s to %s (%r) with score of %f.', prop, best_font.name, best_font.fname, best_score) result = best_font.fname if not os.path.isfile(result): if rebuild_if_missing: _log.info( 'findfont: Found a missing font file. Rebuilding cache.') new_fm = _load_fontmanager(try_read_cache=False) # Replace self by the new fontmanager, because users may have # a reference to this specific instance. # TODO: _load_fontmanager should really be (used by) a method # modifying the instance in place. vars(self).update(vars(new_fm)) return self.findfont( prop, fontext, directory, rebuild_if_missing=False) else: raise ValueError("No valid font could be found") return _cached_realpath(result) @lru_cache() def is_opentype_cff_font(filename): """ Return whether the given font is a Postscript Compact Font Format Font embedded in an OpenType wrapper. Used by the PostScript and PDF backends that can not subset these fonts. """ if os.path.splitext(filename)[1].lower() == '.otf': with open(filename, 'rb') as fd: return fd.read(4) == b"OTTO" else: return False @lru_cache(64) def _get_font(filename, hinting_factor, *, _kerning_factor, thread_id): return ft2font.FT2Font( filename, hinting_factor, _kerning_factor=_kerning_factor) # FT2Font objects cannot be used across fork()s because they reference the same # FT_Library object. While invalidating *all* existing FT2Fonts after a fork # would be too complicated to be worth it, the main way FT2Fonts get reused is # via the cache of _get_font, which we can empty upon forking (in Py3.7+). if hasattr(os, "register_at_fork"): os.register_at_fork(after_in_child=_get_font.cache_clear) def get_font(filename, hinting_factor=None): # Resolving the path avoids embedding the font twice in pdf/ps output if a # single font is selected using two different relative paths. filename = _cached_realpath(filename) if hinting_factor is None: hinting_factor = rcParams['text.hinting_factor'] # also key on the thread ID to prevent segfaults with multi-threading return _get_font(filename, hinting_factor, _kerning_factor=rcParams['text.kerning_factor'], thread_id=threading.get_ident()) def _load_fontmanager(*, try_read_cache=True): fm_path = Path( mpl.get_cachedir(), f"fontlist-v{FontManager.__version__}.json") if try_read_cache: try: fm = json_load(fm_path) except Exception: pass else: if getattr(fm, "_version", object()) == FontManager.__version__: _log.debug("Using fontManager instance from %s", fm_path) return fm fm = FontManager() json_dump(fm, fm_path) _log.info("generated new fontManager") return fm fontManager = _load_fontmanager() findfont = fontManager.findfont
PypiClean
/Lokai-0.3.tar.gz/Lokai-0.3/lokai/tool_box/tb_job_manager/job_environment.py
#----------------------------------------------------------------------- import sys import os import inspect import types import datetime import yaml import logging import lokai.tool_box.tb_common.notification as notify from lokai.tool_box.tb_common.email_address_list import EmailAddressList from lokai.tool_box.tb_common.dates import strtotime, timetostr, now import lokai.tool_box.tb_common.magic_file as magic import lokai.tool_box.tb_common.file_handling as file_handling #----------------------------------------------------------------------- """ File Meta Data ============== Files are able to store meta data as part of the file name. This recognises that the file name is key to the operation of the job system in that each request for activation of a job must have a different filename, otherwise one request might be overwritten by another. The implication is that the filename has meaning. The presence of meta data in the filename gives the application a formal structure to use for distinguishing requests. The meta data is used by the job environment object to manage automatic re-queueing of jobs using the pending queue. Meta data us stored in the filename as a yaml string enclosed in '{}' just before the file extension, if any. This works fine so long as the dictionary only contains scalar values. There is also an implementation limitation that the meta data itself cannot contain '{'. """ def get_file_meta(file_name): """ given a file path, return dictionary: {directory, file_base, extension, meta_string} """ directory, basename = os.path.split(file_name) file_plus, extension = os.path.splitext(basename) file_base = file_plus meta_string = '' meta_values = {} if file_plus[-1] == '}': # we have meta data for ptr in range(len(file_plus)-1, 0, -1): if file_plus[ptr] == '{': file_base = file_plus[:ptr] meta_string = file_plus[ptr:] meta_values = yaml.load(meta_string) break return {'directory': directory, 'file_base': file_base, 'extension': extension, 'meta_string': meta_string, 'meta_values': meta_values, 'file_name': basename} def put_file_meta(file_directory, given_meta_values={}): """ given a path and meta data in the form of a dictionary (see above), return a full path and revised filename in a new dictionary. {directory, file_base, extension, meta_string, meta_values, full_path, file_plus_extn} """ meta_values = ((given_meta_values is not None and given_meta_values) or {}) new_meta_values = {} new_meta_values.update(file_directory['meta_values']) new_meta_values.update(meta_values) meta_string = '' if new_meta_values: meta_string = yaml.dump(new_meta_values, default_flow_style=True).strip() file_plus_extn = "%s%s%s"% (file_directory['file_base'], meta_string, file_directory['extension']) else: file_plus_extn = "%s%s"% (file_directory['file_base'], file_directory['extension']) result = {} result.update(file_directory) result['meta_string'] = meta_string result['meta_values'] = meta_values result['file_name'] = file_plus_extn result['full_path'] = os.path.join(file_directory['directory'], file_plus_extn) return result #----------------------------------------------------------------------- class JobEnvironment(object): """ A JobEnvironment provides the basic tools that allow batch jobs to manage their own processes. The JobWraper object deals with the indentification and management of input and output files. The invocation of an application as a job instance can be done using cron, at, or any other scheduling process. Equally, any standard application patters (file loops, logging, error handling an so on) would be handled by higher level objects using these tools. Environment =========== An application is assumed to manage its communication with the outside world using a set of input and output directories. Input: Immediate: One or more input directories can be set up. Each input directory may contain a further set of sub-directories. JobEnvironment identifies the 'first' actual file in this directory tree and presents it to the application for processing. The file may simply indicate 'start processing', it may define a set of parameters for the process, or it may be a file of data to process. The normal assumption would be that no input means no work to be done. Applications that monitor external events (dates, web-site status and so on) may choose to run without an input file trigger. The input file is opened using a MagicFile object so that, when the file is closed, it is moved to a 'processed' directory (see below). Pending: A directory structure where the immediate sub-directory defines a date and time. This structure is not explored for files until after the given data/time. Thus it is possible to queue actions into the future. Output: Immediate: One or more output directories can be set up. A single output file (opened using a MagicFile object) is duplicated under each of the output directories. This provides fan-out so that the completion of one application can trigger one or more follow-on processes. The output file can be opened with a relative path. This path is preserved underneath each of the output directories. Processed: One or more 'processed' directories can be set up. An input file, opened using a MagicFile object, is moved to a processed directory when the file is closed. If there is more than one such directory the file is duplicated into each of them. This provides fan-out so that the completion of one application can trigger one or more follow-on processes. The sub-directory structure from the input is maintained in this rename process. Thus, if the input is ``source_path/sub_1/my_file`` this will be placed into ``processed_path/sub_1/my_file`` Error: A single error directory can be set up. If the application detects an error (and calls the correct close function) the input file is moved to this error directory. This has the effect of removing the file from the input, so it is not reprocessed, and, at the same time, isolating the problem file so that it can be dealt with. The sub-directory structure from the input is maintained in this rename process. Thus, if the input is ``source_path/sub_1/my_file`` this will be placed into ``error_path/sub_1/my_file`` File Locks ========== It is potentially possible for two process to access the same input directory at the same time, possibly by design, more often because the process has been started by cron before the previous instance has finished. Consequently, it is possible for two processes to attempt to read the same file. The solution is to use a lock file. The job environment automatically creates a hidden file (name starting with '.') in the directory where the file is, using the base name of the file. This is then removed on close. In the case that the input directory is read-only, the job environment can be given a lock directory as an alternative. This can lead to duplicate lock file names for different input files (from different sub-directories). This is not a real problem, however. The wrongly locked input file will be processed at the next run. The lock file processing only works if the job environment methods are used. File locking is not handled by the MagicFile object. Output files are not locked. The operation of the MagicFile object means that the file does not appear in the output directory until it complete. Parameter File ============== The setting of input and output directories is done using one or more Python executable parameter files. The parameter files detailed here handle job management only. System wide parameters, such as database connections, for example, must be handled in some other way. There is a single paramter file that contains default values for various locations. By default the general parameter file is called job_environment.conf and is found in the current working directory. The file does not have to exist. Each application has its own parameter file defining the details for that application. By default the paramter file has the same name as the application (without the path and extension). Thus, the application ``path/to/my_app.py`` has a parameter file called ``my_app``. By default, also, the parameter files are found in a directory ``job_environment.d`` in the current working directory. The locations of both the general and application specific parameter files can be given to JobEnvironment on instantiation. The general paramter file can also contain the name of a directory that contains the aplication files. This is most useful when you need, for example, to run the same application set over different data sets in parallel. The general parameters can point to the data sets and the application specific files can be shared between the two environments. Parameter settings: app_param_path A path, absolute or relative to the current working directory, to the directory containing the application parameter files. Only meaningful in the general parameter file. environment_path A path, absolute or relative to the current working directory, that defines the processing environment. All other relative paths in the settings below are relative to this value. source_path A single path, or a list of paths. This path is searched for input files. pending_path A single path. Timed queue entries are represented by sub-directories immediately below this path. processed_path A path, or list of paths. Input files are moved to this path (or duplicated to all paths in the list) when closed. error_path A single path. Input files are moved to this path when closed. output_path A path or a list of paths. Files opened for output are moved to here on close. temporary_path A single path. Files opened for output are kept here while they are open. lock_file_location A path to a directory where the application specific lock-file will be created. Parameters may also be overridden on object insantiation. """ def __init__(self, general_param_file=None, app_param_path = None, app_param_file=None, environment_path=None, source_path=None, pending_path=None, processed_path=None, error_path=None, output_path=None, temporary_path=None, lock_file_location=None, application_name=None, lock_file_ignore=False, ): # Defaults self.application_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] self.application_name = application_name or self.application_name self.general_param_file = general_param_file or 'job_environment.conf' self.app_param_path = 'job_environment.d' self.environment_path = self.application_name self.source_path = 'input' self.pending_path = 'pending' self.processed_path = 'processed' self.error_path = 'error' self.output_path = 'output' self.temporary_path = 'temp' self.lock_file_ignore = lock_file_ignore # get the general file if it exists else rely on default if os.path.exists(self.general_param_file): self._read_param_file(self.general_param_file) self.app_param_path = app_param_path or self.app_param_path self.app_param_file = app_param_file or self.application_name # get the application file self._read_param_file( os.path.join(self.app_param_path, self.app_param_file)) # Override using the rest of the init arguments self.environment_path = environment_path or self.environment_path self.source_path = source_path or self.source_path self.pending_path = pending_path or self.pending_path self.processed_path = processed_path or self.processed_path self.error_path = error_path or self.error_path self.output_path = output_path or self.output_path self.temporary_path = temporary_path or self.temporary_path self.lock_file_location = lock_file_location # make lists where appropriate if isinstance(self.source_path, types.StringTypes): self.source_path = [self.source_path] if isinstance(self.output_path, types.StringTypes): self.output_path = [self.output_path] if isinstance(self.processed_path, types.StringTypes): self.processed_path = [self.processed_path] # process paths to locate in the environment if self.source_path: self.source_path = [os.path.join(self.environment_path, dd) for dd in self.source_path] if self.output_path: self.output_path = [os.path.join(self.environment_path, dd) for dd in self.output_path] if self.processed_path: self.processed_path = [os.path.join(self.environment_path, dd) for dd in self.processed_path] if self.pending_path: self.pending_path = os.path.join(self.environment_path, self.pending_path) if self.error_path: self.error_path = os.path.join(self.environment_path, self.error_path) if self.temporary_path: self.temporary_path = os.path.join(self.environment_path, self.temporary_path) if self.lock_file_location: self.lock_file_location = os.path.join(self.environment_path, self.lock_file_location) # Control stuff self.current_source_name = None self.current_source_object= None self.current_target_name = None self.current_target_object = None self.current_lock_object = None self.current_lock_fd = None self.current_lock_name = None def _read_param_file(self, file_path): """ Read and execute the given file. Transfer result to our attributes. Avoid overwriting our methods. """ dummy_global = {} argset = {} execfile(file_path, dummy_global, argset) for key, value in argset.iteritems(): try: obj = getattr(self, key) if inspect.ismethod(obj): continue setattr(self, key, value) except AttributeError: setattr(self, key, value) #------------------------------------------------------------------- # Input stuff def get_lock(self, candidate_source, file_path): file_head, file_name = os.path.split( os.path.join(candidate_source, file_path)) lock_head = self.lock_file_location or file_head lock_name = '.LCK.'+file_name lock_path = os.path.join(lock_head, lock_name) if (self.lock_file_ignore and os.path.isfile(lock_path)): os.remove(lock_path) try: self.current_lock_fd = os.open(lock_path, os.O_CREAT+os.O_EXCL) self.current_lock_name = lock_path return True except OSError, message: if str(message).startswith('[Errno 17] File exists'): return False else: raise def clear_lock(self): if self.current_lock_fd: os.close(self.current_lock_fd) os.remove(self.current_lock_name) self.current_lock_fd = None self.current_lock_name = None def open_from_source(self, disposition='k', reverse=False): """ not quite an iterator to provide the next available file. The search is repeated from the top every time so that new entries are found as soon as possible. See magic_file for interpretaion of ``disposition`` """ if self.current_source_object is not None: self.current_source_object.close() self.current_source_object = None for candidate_source in self.source_path: for result in ( file_handling.ordered_walk(candidate_source, reverse=reverse)): if os.path.basename(result).startswith('.'): # Ignore hidden (unix) files continue self.current_source_name = result if not self.get_lock(candidate_source, result): continue self.current_source_object = magic.MagicFile( self.current_source_name, candidate_source, self.processed_path, 'r', disposition=disposition, ) return self.current_source_object #>>>>>>>>>>>>>>>>>>>> if self.pending_path and os.path.exists(self.pending_path): pending_set = os.listdir(self.pending_path) pending_set.sort() for pending in pending_set: pending_time = strtotime(pending) if pending_time < now(): input_path = os.path.abspath( os.path.join(self.pending_path, pending)) for result in ( file_handling.ordered_walk(input_path)): if os.path.basename(result).startswith('.'): # Ignore hidden (unix) files continue self.current_source_name = result if not self.get_lock(candidate_source, result): continue self.current_source_object = ( magic.MagicFile( self.current_source_name, input_path, self.processed_path, 'r', disposition=disposition, ) ) return self.current_source_object #>>>>>>>>>>>>>>>>>>>> return None def input_set(self, disposition='k', reverse=False): """ Iterator for open_from_source """ op = self.open_from_source(disposition, reverse=reverse) while op: yield op op = self.open_from_source(disposition, reverse=reverse) def close_ok(self): """ Close the current input in the normal way """ if self.current_source_object is None: return #>>>>>>>>>>>>>>>>>>>> self.current_source_object.close() self.current_source_object = None self.clear_lock() def close_error(self): """ Close the input after an error. The input is renamed to the error path, if given, or the processed path otherwise. """ if self.current_source_object is None: return #>>>>>>>>>>>>>>>>>>>> if hasattr(self.current_source_object, 'set_rename_target'): self.current_source_object.set_rename_target( directory=self.error_path) self.current_source_object.close() self.current_source_object = None self.clear_lock() def _get_delay_source(self, delay): """ Construct a directory name for a delay queue. Create the directory if needed. """ if self.pending_path: # Calculate name of delay directory delay_source = timetostr( now()+datetime.timedelta(seconds=delay), "%Y%m%d%H%M") delay_path = os.path.join(self.pending_path, delay_source) if not os.path.exists(delay_path): os.makedirs(delay_path) return delay_path return None def close_pending(self, delay): """ Close the input and re-queue it for execution after the given number of seconds delay. The file name is given meta data with a 'repeat' count so that the application can keep track of how many times this gets done. """ if self.current_source_object is None: return #>>>>>>>>>>>>>>>>>>>> if hasattr(self.current_source_object, 'set_rename_target'): delay_source = self._get_delay_source(delay) if delay_source: # Add repeat count meta data file_detail = get_file_meta(self.current_source_name) meta_count = file_detail['meta_values'].get('repeat', 0) meta_count += 1 new_file_detail = put_file_meta(file_detail, {'repeat': meta_count}) self.current_source_object.set_rename_target( name=new_file_detail['file_name'], directory=delay_source) self.current_source_object.close() self.current_source_object = None self.clear_lock() def post_pending(self, future_date, file_name, file_content): """ Create a new file in an input directory for this process. future_date = the date after which the file should be used. Can be None or empty, in which case future date is today. file_name = the name of the file to create. file_content = some text to put into the file. """ use_date = future_date if not future_date: use_date = now() control_source = timetostr(use_date, "%Y%m%d%H%M") control_path = os.path.join(self.pending_path, control_source) if not os.path.exists(control_path): os.makedirs(control_path) file_path = os.path.join(control_path, file_name) fp = open(file_path, 'w') fp.write(file_content) fp.close() def get_base_input_name(self): if self.current_source_name: return os.path.basename(self.current_source_name) else: return None #------------------------------------------------------------------- # Output stuff def open_output(self, file_name, disposition='k'): """ Open the given file (relative path) for output. It will appear in the output directory when closed. See magic_file for interpretaion of ``disposition`` """ self.close_output() # with no opportunity for rollback self.current_target_object = magic.MagicFile( file_name, self.temporary_path, self.output_path, 'w', disposition=disposition, ) return self.current_target_object def close_output(self, delete=False): """ Close the current output file. Set delete=True to delete the output. """ if self.current_target_object is not None: self.current_target_object.close(delete=delete) self.current_target_object = None def set_output_rename_target(self, name=None, directory=None): """ See magic_file set_rename_target """ if self.current_target_object is not None: self.current_target_object.set_rename_target(name, directory) def output_rollback(self): """ Abandon any output by closing the file and then deleting it. This does not work for appending to existing files! """ if self.current_target_object is None: return #>>>>>>>>>>>>>>>>>>>> self.close_output(delete=True) def output_commit(self): """ Close the output file in the normal way. """ self.close_output() #------------------------------------------------------------------- # Lock file def get_lock_file(self): """ Return the path to the lock file for this application """ return os.path.join(self.lock_file_location, self.application_name) def open_lock(self, force=False): """ Grab the lock. Delete any previous lock if force is True """ lock_file = self.get_lock_file() if force: self.close_lock() try: self.current_lock_object = os.open(lock_file, os.O_CREAT+os.O_EXCL) return True except: return False def close_lock(self): """ Close any open lock """ if self.current_lock_object: os.close(self.current_lock_object) lock_file = self.get_lock_file() if os.path.isfile(lock_file): os.remove(lock_file) #----------------------------------------------------------------------- def setLoggers(verbosity=0, log_file=None, process_id='unspecified', email_from=None, email_to=None, email_subject_line=None, criticality_level=logging.CRITICAL, critical_to=None, critical_subject=None, ): """ Batch jobs need logging. setLoggers is a tool to initialise logging in an application. This set-up includes creating a logger channel using ``process_id`` as the name. Applications should use:: ``from notification import (critical, error, warning, info, debug)`` Then, passing a log message back through the appropriate one of these routines will do the right thing. Logging may be set up to use an email logger (see tool_box.tb_common.notification). For email delivery, logging assumes that reports with a logging level higher than some value may be different from lower priority reports. This is reflected in the options that can be given. High level reports go to 'critical_to' Low level reports go to 'email_to' - high level reports do _not_ got to this email. Other handlers can be added as required. This is not a critical part of the job environment. There are many different ways to do this. ymmv. """ if process_id != 'unspecified': notify.setLogName(process_id) verbosity_dict = { 0 : logging.ERROR, 1 : logging.WARNING, 2 : logging.INFO, 3 : logging.DEBUG} level = verbosity_dict.get(min(3, verbosity), logging.ERROR) logging.basicConfig(level=logging.ERROR, stream=sys.stderr) this_logger = logging.getLogger(notify.getLogName()) this_logger.setLevel(level) if log_file: # Output to designated file handler = logging.FileHandler(log_file) else: # Output to console instead handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) logging.getLogger(notify.getLogName()).addHandler(handler) if email_from: if critical_to: mail_target = notify.checkingSMTPHandler( 'localhost', email_from, EmailAddressList(critical_to).as_list, critical_subject, ) mail_formatter = notify.BulkFormatter( linefmt = logging.Formatter( fmt=( "%(levelname)s:%(pathname)s:%(lineno)d:%(asctime)s:" "%(message)s\n") ) ) dev_mailer = notify.BulkHandler() dev_mailer.addTarget(mail_target) dev_mailer.addFormatter(mail_formatter) dev_mailer.setLevel(criticality_level) this_logger.addHandler(dev_mailer) if email_to: mail_target = notify.checkingSMTPHandler( 'localhost', email_from, EmailAddressList(email_to).as_list, email_subject_line ) mail_formatter = notify.BulkFormatter( linefmt = logging.Formatter( fmt=("%(levelname)s:%(message)s\n") ) ) mailer = notify.MailerHandler() mailer.addTarget(mail_target) mailer.addFormatter(mail_formatter) mailer.addFilter(notify.FilterMax(criticality_level)) logging.getLogger(notify.getLogName()).addHandler(mailer) #-----------------------------------------------------------------------
PypiClean