id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/AMAS_sb-1.0.1-py3-none-any.whl/AMAS/iterator.py | import copy
import itertools
import numpy as np
import os
from AMAS import constants as cn
from AMAS import tools
# from AMAS import species_annotation as sa
# from AMAS import reaction_annotation as ra
# Keys when evaluating match results.
NEW_SCORE = 'new_score'
OLD_SCORE = 'old_score'
INCREASED = 'is_increased'
# Max limit for iteration
MAX_ITER = 3
class Iterator(object):
def __init__(self,
cur_spec_formula,
reaction_cl,
reactions_to_update=None):
"""
Ideally, arguments should be directly from
the relevant species.formula and reactions.candidates.
Returns may be dictionaries
for species.candidates and reactions.candidates;
(not 100% confirmed yet)
Parameters
----------
cur_spec_formula: dict
{species_id: [predicted-formulas]}
Current (most recent) annotation of species
reaction_cl: AMAS.reaction_annotation.ReactionAnnotation
reaction_annotation class instance with
loaded SBML model information (reaction_components, etc.)
reaction_to_update: list-str
List of reactions to update; if None, use all reactions
from self.reactions.candidates
"""
self.orig_spec_formula = cur_spec_formula
# Storing reaction candidates separately,
# as it may be different than self.reactions.candidates
self.reactions = reaction_cl
if reactions_to_update:
self.r2upd = reactions_to_update
else:
self.r2upd = list(reaction_cl.candidates.keys())
def getDictOfRheaComponentFormula(self, inp_rhea):
"""
Get a dictionary {chebi_id: formula}
from a given rhea term.
Rhea term -> CheBI IDs -> Formulas
Parameters
----------
str: inp_rhea
A Rhea identifier
Returns
-------
: dict
{chebi_id: formula-str}
"""
chebis = cn.REF_RHEA2CHEBI[inp_rhea]
return {val:cn.REF_CHEBI2FORMULA[val] for val in chebis \
if val in cn.REF_CHEBI2FORMULA.keys()}
def getDictMatchByItem(self,
chebi2ref_formula,
spec2pred_formula):
"""
Get match between two keys,
where there are exactly
one matching items.
If all items are matched by 1-1
(i.e., one species - one chebi),
return the fully matched dictionary.
(i.e., improve precision)
If neither, return None.
(i.e., nothing to update)
Parameters
----------
chebi2ref_formula: dict
{chebi_term: a_species_formula(string)}
spec2pred_formula: dict
{species_id: [predicted_formulas]}
Returns
-------
dict/None
{species_id: [chebi_term]}
"""
match_dict = {one_k:[spec_id for spec_id in spec2pred_formula.keys() \
if chebi2ref_formula[one_k] in spec2pred_formula[spec_id]
] \
for one_k in chebi2ref_formula.keys()}
unmatched_species = [val for val in spec2pred_formula.keys() \
if val not in list(itertools.chain(*match_dict.values()))]
unmatched_chebi = [val for val in match_dict.keys() if not match_dict[val]]
if len(unmatched_species) == 1 and len(unmatched_chebi) == 1:
return {unmatched_species[0]: unmatched_chebi}
# reverse match_dict into the proper return format.
elif all([len(val[1])==1 for val in list(match_dict.items())]):
return {match_dict[k][0]: [k] for k in match_dict.keys()}
else:
return None
def getDictsToUpdate(self, reaction_id):
"""
Using self.getDictMatchByItem(),
get dictionaries to update
Parameters
----------
str: reaction_id
Returns
-------
match_res: dict
{species_id: [ChEBI terms]}
match_res_formula: dict
{species_id: [formula-str]}
"""
one_rhea = self.reactions.candidates[reaction_id][0][0]
# match_res will look like {species_id: [CHEBI term]}
# filter to have only keys and items of one reaction
filt_spec_formula = {k:self.orig_spec_formula[k] \
for k in self.reactions.reaction_components[reaction_id]}
upd_spec_chebi = self.getDictMatchByItem(chebi2ref_formula=self.getDictOfRheaComponentFormula(one_rhea),
spec2pred_formula=filt_spec_formula)
if upd_spec_chebi:
upd_spec_formula = {k:[cn.REF_CHEBI2FORMULA[chebi] \
for chebi in upd_spec_chebi[k]] for k in upd_spec_chebi.keys()}
else:
upd_spec_formula = None
return upd_spec_chebi, upd_spec_formula
def getUpdatedMatchScore(self, cur_spec_formulas, inp_spec2formula_dict):
"""
Check whether it improves reaction measures;
if new value (sum of maximum match score per reaction)
increased, return True; otherwise return False.
Parameters
----------
cur_spec_formulas: dict
{'species_id': [formula-str]}
Dictionary to be updated
inp_spec_2formula_dict: dict
{'species_id': [formula-str]}
Dictionary to update
Returns
-------
: dict
"""
cur_spec_formulas.update(inp_spec2formula_dict)
new_pred_res = self.reactions.getRScores(spec_dict=cur_spec_formulas,
reacs=list(self.r2upd),
mssc='top',
cutoff=0.0)
old_pred_res = self.reactions.getRScores(spec_dict=self.orig_spec_formula,
reacs=list(self.r2upd),
mssc='top',
cutoff=0.0)
# since candidates are already sorted,
# just check the match score (index '1') of the very first candidate tuple (index '0')
new_pred_val = np.mean([new_pred_res[k][0][1] \
for k in new_pred_res.keys()])
old_pred_val = np.mean([old_pred_res[k][0][1] \
for k in old_pred_res.keys()])
return {NEW_SCORE: new_pred_val,
OLD_SCORE: old_pred_val,
INCREASED: new_pred_val>old_pred_val}
def match(self):
"""
Use self.runOneMatchCycle()
and determine the final products to return.
Will be used by the recommender or the user.
"""
all_upd_spec_chebi = dict()
for _ in range(MAX_ITER):
upd_spec_chebi = self.runOneMatchCycle()
if upd_spec_chebi:
all_upd_spec_chebi.update(upd_spec_chebi)
# Update the formula attribute for the next iteration
for one_k in upd_spec_chebi.keys():
self.orig_spec_formula[one_k] = [cn.REF_CHEBI2FORMULA[val] \
for val in upd_spec_chebi[one_k] \
if val in cn.REF_CHEBI2FORMULA.keys()]
else:
break
# Maybe run reaction once, and return final results :)
return all_upd_spec_chebi
def runOneMatchCycle(self):
"""
Using the methohds & information,
determine species to update.
(Reaction will be updated in the following steps).
This method will directly used by
the Recommender, or even the user.
Returns
-------
combine_upd_spec2chebi: dict
{species_id: [ChEBI terms]}
"""
combine_upd_spec2chebi = dict()
# Use reactions existing in self.r2upd
for one_reaction in self.r2upd:
one_rhea_tup = self.reactions.candidates[one_reaction]
one_rhea = one_rhea_tup[0][0]
pred_spec_formulas = self.orig_spec_formula
one_rhea2formula = self.getDictOfRheaComponentFormula(inp_rhea=one_rhea)
upd_spec2chebi, upd_spec2formula = self.getDictsToUpdate(reaction_id=one_reaction)
# Meaning, when examining match scores we only consider
# individual updates; not cumulated updtaes (so we don't use combine_spec2chhebi below)
if upd_spec2formula:
upd_val = self.getUpdatedMatchScore(cur_spec_formulas = copy.deepcopy(self.orig_spec_formula),
inp_spec2formula_dict = upd_spec2formula)
if upd_val[INCREASED]:
# update combine_upd_spec2chebi by combining the elements.
for k in upd_spec2chebi.keys():
if k in combine_upd_spec2chebi.keys():
combine_upd_spec2chebi[k] = list(set(combine_upd_spec2chebi[k] + upd_spec2chebi[k]))
else:
combine_upd_spec2chebi[k] = upd_spec2chebi[k]
return combine_upd_spec2chebi | PypiClean |
/Confopy-0.4.11.tar.gz/Confopy-0.4.11/README.md | Confopy
=======
Asserts the linguistic and structural quality of scientific texts.
Confopy is a command-line tool that accepts one or multiple PDF documents and prints textual reports.
Currently it only works for German papers.
Name origin: Confopy := Conform + Python
Installation
============
Installation using pypi (preferred)
-----------------------------------
sudo pip install -U Confopy
Launch Confopy with
confopy --help
confopy -r document your_paper.pdf
Manual installation
-------------------
Dependencies:
sudo apt-get install python-pdfminer
sudo pip install -U lxml
sudo pip install numpy==1.6.2
sudo pip install pyyaml nltk==3.0.0
sudo pip install pyenchant==1.6.5
sudo pip install pattern==2.6
Launch Confopy with
python confopy/ --help
python confopy/ -r document your_paper.pdf
Usage
=====
$ confopy -h
usage: confopy [-h] [-l LANGUAGE] [-lx] [-ml] [-o OUTFILE] [-r REPORT] [-rl]
[-ul] [-vl] [-x]
[file [file ...]]
Language and structure checker for scientific documents.
positional arguments:
file Document file to analyze (PDF).
optional arguments:
-h, --help show this help message and exit
-l LANGUAGE, --language LANGUAGE
Language to use for PDF extraction and document
analysis. Default: de
-lx, --latex Tell the specified report to format output as LaTeX
(if supported by the report).
-ml, --metriclist Lists all available metrics by language and exits.
-o OUTFILE, --outfile OUTFILE
File to write the output too. Default: terminal
(stdout).
-r REPORT, --report REPORT
Analyses the given document according to the specified
report.
-rl, --reportlist Lists all available reports by language and exits.
-ul, --rulelist Lists all rules and exits.
-vl, --validate Validates a given XML against the XSD for the Confopy
data model.
-x, --xml Converts the PDF file(s) to Confopy XML (structure
orientated).
Getting a corpus
================
Confopy needs a corpus (collection of language data) to run.
For German (TIGER treebank):
Automated download:
1. Go to
<your python package directory>/confopy/localization/de/corpus\_de/
2. Execute the script
tiger_dl_patch.py
within that folder
Manual download:
1. Go to:
http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/license/htmlicense.html
2. Accept the license and download TIGER-XML Release 2.2:
http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/download/tigercorpus-2.2.xml.tar.gz
3. Unpack the archive into confopy/localization/de/corpus\_de/
4. Run the patch tiger\_release\_aug07.corrected.16012013\_patch.py in the same folder
5. Verify that the generated file is named exactly like in confopy/config.py
Python 3
========
* The package python-pdfminer only works with python 2.4 or newer, but not with python 3
Known Issues and Workarounds
===============================
enchant.errors.DictNotFoundError: Dictionary for language 'de_DE' could not be found
------------------------------------------------------------------------------------
Install the German aspell package. E.g. on Ubuntu 16.04:
```
sudo apt install aspell-de
```
Unicode errors
--------------
* Configure terminal to use unicode!
* For Python devs:
http://docs.python.org/2/howto/unicode.html#the-unicode-type
* Convert the TIGER Treebank file
"tiger_release_aug07.corrected.16012013.xml"
to utf-8 encoding before using Confopy!
| PypiClean |
/KaChLog-1.2.1-py3-none-any.whl/kachlog/_changelog.py | import datetime
import re
from typing import Dict, Iterable, List, Optional, Union
from ._versioning import (
actual_version,
bump,
from_semantic,
semantic_order,
to_semantic,
to_sorted_semantic,
)
from .exceptions import InvalidSemanticVersion
from .templates import BASE, TYPES_OF_CHANGE
def is_release(line: str) -> bool:
return line.startswith("## ")
def add_release(changes: Dict[str, dict], line: str) -> dict:
release_line = line[3:].lower().strip(" ")
# A release is separated by a space between version and release date
# Release pattern should match lines like: "[0.0.1] - 2020-12-31" or [Unreleased]
version, release_date = (
release_line.split(" ", maxsplit=1)
if " " in release_line
else (release_line, None)
)
version = unlink(version)
metadata = {"version": version, "release_date": extract_date(release_date)}
try:
metadata["semantic_version"] = to_semantic(version)
except InvalidSemanticVersion:
pass
return changes.setdefault(version, {"metadata": metadata})
def unlink(value: str) -> str:
return value.lstrip("[").rstrip("]")
def extract_date(date: str) -> str:
if not date:
return date
return date.lstrip(" -(").rstrip(" )")
def is_category(line: str) -> bool:
return line.startswith("### ")
def add_category(release_dict: dict, line: str) -> List[str]:
category = line[4:].lower().strip(" ")
return release_dict.setdefault(category, [])
# Link pattern should match lines like: "[1.2.3]: https://github.com/user/project/releases/tag/v0.0.1"
link_pattern = re.compile(r"^\[(.*)\]: (.*)$")
def is_link(line: str) -> bool:
return link_pattern.fullmatch(line) is not None
def add_information(category: List[str], line: str):
category.append(line.lstrip(" *-").rstrip(" -"))
def to_raw_dict(changelog_path: str) -> Dict[str, dict]:
changes = {}
# As URLs can be defined before actual usage, maintain a separate dict
urls = {}
with open(changelog_path, "r", encoding="utf-8") as change_log:
current_release = {}
for line in change_log:
clean_line = line.strip(" \n")
if is_release(clean_line):
current_release = add_release(changes, clean_line)
elif is_link(clean_line):
link_match = link_pattern.fullmatch(clean_line)
urls[link_match.group(1).lower()] = link_match.group(2)
elif clean_line:
current_release["raw"] = current_release.get("raw", "") + line
# Add url for each version (create version if not existing)
for version, url in urls.items():
changes.setdefault(version, {"metadata": {"version": version}})["metadata"][
"url"
] = url
unreleased_version = None
for version, current_release in changes.items():
metadata = current_release["metadata"]
# If there is an empty release date, it identify the unreleased section
if ("release_date" in metadata) and not metadata["release_date"]:
unreleased_version = version
changes.pop(unreleased_version, None)
return changes
def to_dict(
changelog_path: Union[str, Iterable[str]], *, show_unreleased: bool = False
) -> Dict[str, dict]:
"""
Convert changelog markdown file following keep a changelog format into python dict.
:param changelog_path: Path to the changelog file, or context manager providing iteration on lines.
:param show_unreleased: Add unreleased section (if any) to the resulting dictionary.
:return python dict containing version as key and related changes as value.
"""
# Allow for changelog as a file path or as a context manager providing content
try:
with open(changelog_path, "r", encoding="utf-8") as change_log:
return _to_dict(change_log, show_unreleased)
except TypeError:
return _to_dict(changelog_path, show_unreleased)
def _to_dict(change_log: Iterable[str], show_unreleased: bool) -> Dict[str, dict]:
changes = {}
# As URLs can be defined before actual usage, maintain a separate dict
urls = {}
current_release = {}
category = []
for line in change_log:
line = line.strip(" \n")
if is_release(line):
current_release = add_release(changes, line)
category = current_release.setdefault("uncategorized", [])
elif is_category(line):
category = add_category(current_release, line)
elif is_link(line):
link_match = link_pattern.fullmatch(line)
urls[link_match.group(1).lower()] = link_match.group(2)
elif line:
add_information(category, line)
# Add url for each version (create version if not existing)
for version, url in urls.items():
changes.setdefault(version, {"metadata": {"version": version}})["metadata"][
"url"
] = url
# Avoid empty uncategorized
unreleased_version = None
for version, current_release in changes.items():
metadata = current_release["metadata"]
if not current_release.get("uncategorized"):
current_release.pop("uncategorized", None)
# If there is an empty release date, it identify the unreleased section
if ("release_date" in metadata) and not metadata["release_date"]:
unreleased_version = version
if not show_unreleased:
changes.pop(unreleased_version, None)
elif "unreleased" not in changes:
changes["unreleased"] = _unreleased()
return changes
def from_release(release_dict: dict, version: str) -> str:
content = ""
metadata = release_dict["metadata"]
content += f"\n## [{metadata['version'].capitalize()}]"
if metadata.get("release_date"):
content += f" - {metadata['release_date']}"
uncategorized = release_dict.get("uncategorized", [])
for category_content in uncategorized:
content += f"\n* {category_content}"
if uncategorized:
content += "\n"
version_changes = ""
for category_name, category_content in release_dict.items():
if category_name in ["metadata", "uncategorized"]:
continue
if category_content or version == "unreleased":
version_changes += f"\n### {category_name.capitalize()}"
for categorized in category_content:
version_changes += f"\n- {categorized}"
version_changes += "\n"
if version_changes:
content += "\n"
content += version_changes
if not version_changes:
content += "\n"
return content
def from_dict(changelog: Dict[str, dict]) -> str:
content = BASE
versions = [version for version, _ in to_sorted_semantic(changelog.keys())]
versions.append("unreleased") # unreleased shoud be there for this
for version in reversed(versions):
content += from_release(changelog[version], version)
content += "\n"
for version in reversed(versions):
current_release = changelog[version]
metadata = current_release["metadata"]
if not metadata.get("url"):
continue
content += f"[{metadata['version'].capitalize()}]: {metadata['url']}\n"
return content
def _unreleased(sections: bool = False) -> Dict:
unreleased = {"metadata": {"version": "unreleased", "release_date": None}}
if sections:
unreleased.update({change_type: [] for change_type in TYPES_OF_CHANGE})
return unreleased
def release(
changelog: Dict[str, dict], new_version: str = None, sections: bool = False
) -> Optional[str]:
"""
Release a new version based on changelog unreleased content.
:param changelog_path: Path to the changelog file.
:param new_version: The new version to use instead of trying to guess one.
:return: The new version, None if there was no change to release.
"""
new_release = changelog["unreleased"].copy()
metadata = {}
current_version, current_semantic_version = actual_version(changelog)
if not new_version:
metadata["semantic_version"] = bump(new_release, current_semantic_version)
new_version = from_semantic(metadata["semantic_version"])
else:
metadata["semantic_version"] = to_semantic(new_version)
compare = semantic_order(
(new_version, metadata["semantic_version"]),
(current_version, current_semantic_version),
)
if compare <= 0: # input version lower than current (newest) version
raise InvalidSemanticVersion(new_version)
if new_version:
metadata["version"] = new_version
metadata["release_date"] = datetime.date.today().isoformat()
new_release.update({"metadata": metadata})
changelog.update({"unreleased": _unreleased(sections), new_version: new_release})
return new_version | PypiClean |
/FlashXTest-3.6.tar.gz/FlashXTest-3.6/README.rst | Flash-X-Test
============
Installation
============
- Use ``./setup develop`` for development mode
- Or use ``./setup install`` to install package
on your system
- Or use ``pip install git+ssh://[email protected]/Flash-X/Flash-X-Test.git@main``
- You are good to go. Now you can simply type ``flashxtest`` on your
command line and it will be availabe, or you can import the Python API using
``import FlashXTest.api as flashxtest``
Usage
=====
``flashxtest --help`` and ``flashxtest [command] --help`` provide information on usage
Examples
========
See ``FlashXTest/example/example.suite`` for an example test suite
| PypiClean |
/MOAI-2.0.0.tar.gz/MOAI-2.0.0/moai/wsgi.py | import os
from webob import Request, Response
from moai.server import Server, FeedConfig
from moai.database import get_database
class WSGIRequest(object):
"""This is a request object that can be used in a WSGI environment.
It implements :ref:`IServerRequest` interface.
"""
def __init__(self, request):
self._req = request
def url(self):
return self._req.url
def redirect(self, url):
"""Redirect to this url
"""
response = Response()
response.status = 302
response.location = url
return response
def send_file(self, path, mimetype):
"""Send the file located at 'path' back to the user
"""
response = Response(content_type=mimetype,
conditional_response=True)
response.last_modified = os.path.getmtime(path)
response.app_iter = FileIterable(path)
with open(path) as f:
response.body = f.read()
response.content_length = os.path.getsize(path)
# do not accept ranges, since this does not work reliable
# with acrobat IE plugin
response.headers['Accept-Ranges'] = 'none'
return response
def query_dict(self):
"""Return a dictionary with QueryString values of the
request
"""
args = dict(self._req.GET)
args.update(dict(self._req.POST))
return args
def write(self, data, mimetype):
"""Write data back to the client
"""
response = Response()
response.content_type = mimetype
response.body = data
return response
def send_status(self, code, msg='', mimetype='text/plain'):
response = Response()
response.content_type = mimetype
response.status = int(code.split()[0])
response.body = msg
return response
class MOAIWSGIApp(object):
# the wsgi app, calls the IServer with the IServerRequest
def __init__(self, server):
self.server = server
def __call__(self, environ, start_response):
request = Request(environ)
response = self.server.handle_request(WSGIRequest(request))
return response(environ, start_response)
def app_factory(global_config,
name,
url,
admin_email,
database,
formats,
**kwargs):
# WSGI APP Factory
formats = formats.split()
admin_email = admin_email.split()
sets_deleted = kwargs.get('deleted_sets') or []
if sets_deleted:
sets_deleted = sets_deleted.split()
sets_disallowed = kwargs.get('disallowed_sets', '') or []
if sets_disallowed:
sets_disallowed = sets_disallowed.split()
sets_allowed = kwargs.get('allowed_sets', '') or []
if sets_allowed:
sets_allowed = sets_allowed.split()
sets_needed = kwargs.get('needed_sets', '') or []
if sets_needed:
sets_needed = sets_needed.split()
database = get_database(database, kwargs)
feedconfig = FeedConfig(name,
url,
admin_emails=admin_email,
metadata_prefixes=formats,
sets_deleted=sets_deleted,
sets_disallowed=sets_disallowed,
sets_allowed=sets_allowed,
sets_needed=sets_needed,
extra_args=kwargs)
server = Server(url, database, feedconfig)
return MOAIWSGIApp(server)
class FileIterable(object):
# Helper objects to stream asset files
def __init__(self, filename, start=None, stop=None):
self.filename = filename
self.start = start
self.stop = stop
def __iter__(self):
return FileIterator(self.filename, self.start, self.stop)
def app_iter_range(self, start, stop):
return self.__class__(self.filename, start, stop)
class FileIterator(object):
chunk_size = 4096
def __init__(self, filename, start, stop):
self.filename = filename
self.fileobj = open(self.filename, 'rb')
if start:
self.fileobj.seek(start)
if stop is not None:
self.length = stop - start
else:
self.length = None
def __iter__(self):
return self
def next(self):
if self.length is not None and self.length <= 0:
raise StopIteration
chunk = self.fileobj.read(self.chunk_size)
if not chunk:
raise StopIteration
if self.length is not None:
self.length -= len(chunk)
if self.length < 0:
# Chop off the extra:
chunk = chunk[:self.length]
return chunk | PypiClean |
/MFAProblem-1.0.1b0.tar.gz/MFAProblem-1.0.1b0/mfa_problem/io_excel.py | import os
import tempfile
import xmltodict
import shutil
import pandas as pd
import numpy as np
from zipfile import ZipFile
from openpyxl import load_workbook
from collections import OrderedDict
from openpyxl.formatting.rule import CellIsRule
from openpyxl.styles import PatternFill, Font
try:
from . import io_bdd
except Exception:
import io_bdd
try:
from . import su_trace
except Exception:
import su_trace
def load_mfa_problem_from_excel(
input_file: str,
create_empty_ter=False # if True only products and sectors worksheets will be extracted
):
'''Main convertor routine. Call dedicated routine depending on input type
- input_file : string with input file name (with extension and path)
'''
input_categories = [
'param', 'dim_products', 'dim_sectors', 'data',
'min_max', 'constraints', 'ter_base'
]
df_ifile = {}
tab_list = []
df_ifile = pd.read_excel(input_file, None)
real_tablist = [*df_ifile]
consistant_tab_list = ['prod', 'sect']
if not create_empty_ter:
consistant_tab_list += [
'param', 'flux', 'data', 'minmax',
'constr', 'proxy', 'pflow', 'psect'
]
# keeping only consistent sheets
for real_tab in real_tablist:
consistent_sheet = consistantSheetName(real_tab)
if consistent_sheet in consistant_tab_list:
# real_tab is consistent
tab_list.append(real_tab)
if (consistent_sheet in ['flux']):
# dtype has to be str to keep geographic ids (of Communes for exemple) in
# string format => reload flux worksheet with the correct dtype
del df_ifile[real_tab]
df_ifile[real_tab] = pd.read_excel(input_file, sheet_name=real_tab, dtype=str)
su_trace.logger.debug('Names of excel sheet :' + str(tab_list))
mfa_problem_input = xl_convert_tablist(df_ifile, tab_list)
# add eventual missing entries
for input_category in input_categories:
if input_category not in mfa_problem_input.keys():
mfa_problem_input[input_category] = []
return mfa_problem_input
def consistantSheetName(
prop_sheet: str
):
'''
Test if the prop_sheet is consistent with the allowed sheet list.
- Result is empty string if the tested sheet is not consistant.
- Result is the dictionary key corresponding of the allowed list found.
Note 1 : if the prop_sheet input is empty ('') the result is a list of
allowed sheet name as a string
Note 2 : a particular case is taken into account for proxy input file which
usualy has 3 proxy sheets (and one of them with 'sector' keyword in its name)
'''
dictofsheetsnames = {
'param': ['param'], 'prod': ['produit', 'product'],
'sect': ['secteur', 'sector'], 'geo': ['geo', 'liste'],
'flux': ['ter', 'flux'],
'data': ['data', 'donn'], 'minmax': ['min max', 'min_max'],
'constr': ['contrainte', 'constraint'],
'proxy': ['proxi', 'proxy'], 'pflow': ['flow'], 'psect': []
}
prop_sheet = prop_sheet.lower()
list_allowed = ''
if prop_sheet != '':
particular_case = False
for allow_sheet in dictofsheetsnames['proxy']:
if allow_sheet in prop_sheet:
particular_case = True
if particular_case:
# We are in the particular case (cf Note 2)
for allow_sheet in dictofsheetsnames['pflow']:
if allow_sheet in prop_sheet:
return 'pflow'
for allow_sheet in dictofsheetsnames['sect']:
if allow_sheet in prop_sheet:
return 'psect'
return 'proxy'
else:
for dict_key in dictofsheetsnames.keys():
for allow_sheet in dictofsheetsnames[dict_key]:
if allow_sheet in prop_sheet:
return dict_key
else:
for dict_key in dictofsheetsnames.keys():
if len(dictofsheetsnames[dict_key]) != 0:
list_allowed += ', '.join(dictofsheetsnames[dict_key])
return list_allowed
def xl_import_tab(
df_fi: dict,
stab: str,
def_val: list,
js_tab: str,
mfa_problem_input: dict
):
"""Import informations from workbook tab called stab if it exists
- df_fi : dataframe with all sheets of the input file
- stab : name of the workbook sheet to work on
- def_val : dictionnary of default values (default columns values of excel sheet)
- js_tab : name of the main JSon dictionnary key for this entry
- mfa_problem_input : dictionnary with informations to convert in JSon format
"""
my_json = []
try:
df_prod = df_fi[stab]
# Check if we need to add empty columns
if len(list(df_prod)) < len(def_val):
starti = len(list(df_prod))
for i in range(starti, len(def_val)):
cname = "Col_" + str(i)
df_prod[cname] = def_val[i]
# Fill dataframe nan with default value
for i, col in enumerate(list(df_prod)): # iterable on columns names
vdef = ''
if i < len(def_val):
vdef = def_val[i]
if vdef is not None:
df_prod[col] = df_prod[col].fillna(value=vdef)
if type(vdef) is str:
df_prod[col] = df_prod[col].astype(str)
if type(vdef) is int:
df_prod[col] = df_prod[col].astype(int)
else:
df_prod[col] = df_prod[col].replace({np.nan: None})
# Extract values (nparray) from dataframe and convert them in a list format
my_json = df_prod.values.tolist()
except Exception:
su_trace.logger.error(f'Exception in xl_import_tab for {js_tab} entry')
mfa_problem_input[js_tab] = my_json
def input_to_json(
input_type,
input_file,
sess_act,
mod_name
):
'''Main convertor routine. Call dedicated routine depending on input type
- input_type : type of the input (0 : xls/xlsx/csv, 1: database, 2: JSon)
- input_file : string with input file name (with extension and path)
- xltab_list : list of main entries awaited
- jstab_list : list of entries needed in JSon file
'''
# Names of tab sheets as they should be after naming normalisation
jstab_list = [
'param', 'dim_products', 'dim_sectors', 'data', 'min_max',
'constraints', 'ter_base'
]
mfa_problem_input = {}
if input_type == 0: # excel input
# import input_file only once
df_ifile = pd.read_excel(input_file, None)
myxl_tablist = [*df_ifile] # myxl_tablist = list(df_ifile.keys()) is slower
su_trace.logger.debug('Names of excel sheet :' + str(myxl_tablist))
mfa_problem_input = xl_convert_tablist(df_ifile, myxl_tablist)
# add eventual missing entries
for tab in jstab_list:
if tab not in mfa_problem_input.keys():
mfa_problem_input[tab] = []
elif input_type == 2: # input from data base
mfa_problem_input['param'] = io_bdd.read_inputs(io_bdd.Param, sess_act, mod_name, '', '')
mfa_problem_input['dim_products'] = io_bdd.read_inputs(io_bdd.Product, sess_act, mod_name, '', '')
mfa_problem_input['dim_sectors'] = io_bdd.read_inputs(io_bdd.Sector, sess_act, mod_name, '', '')
mfa_problem_input['ter_base'] = io_bdd.read_inputs(io_bdd.Flux, sess_act, mod_name, '', '')
mfa_problem_input['data'] = io_bdd.read_inputs(io_bdd.Data, sess_act, mod_name, '', '')
mfa_problem_input['min_max'] = io_bdd.read_inputs(io_bdd.MinMax, sess_act, mod_name, '', '')
mfa_problem_input['constraints'] = io_bdd.read_inputs(io_bdd.Constraint, sess_act, mod_name, '', '')
else:
pass
# add eventual missing entries
for tab in jstab_list:
if tab not in mfa_problem_input.keys():
mfa_problem_input[tab] = []
return mfa_problem_input
def xl_get_sheet_details(
file_path,
only_sheets=True
):
'''
Finded at : https://stackoverflow.com/questions/17977540/pandas-looking-up-the-list-of-sheets-in-an-excel-file
Speedest way to get informations from an excel file without the nead to open it
Benchmarking: (On a 6mb xlsx file with 4 sheets)
Pandas, xlrd: 12 seconds
openpyxl: 24 seconds
Proposed method: 0.4 seconds
Notes (modifications I made):
- use tempfile.mkdtemp instead of settings.MEDIA_ROOT
- routine adapted to extract only sheets names (when entry only_sheets=True)
Requirements :
- must install xmltodict and add 'import xmltodict'
- must add 'import tempfile'
- must add 'import shutil'
- must add 'from zipfile import ZipFile'
'''
sheets = []
# Make a temporary directory to work in
directory_to_extract_to = tempfile.mkdtemp()
# Extract the xlsx file as it is just a zip file
zip_ref = ZipFile(file_path, 'r')
zip_ref.extractall(directory_to_extract_to)
zip_ref.close()
# Open the workbook.xml which is very light and only has meta data, get sheets from it
path_to_workbook = os.path.join(directory_to_extract_to, 'xl', 'workbook.xml')
with open(path_to_workbook, 'r', encoding='utf8') as f:
xml = f.read()
dictionary = xmltodict.parse(xml, encoding='utf-8')
for sheet in dictionary['workbook']['sheets']['sheet']:
sheet_details = {
'id': sheet['@sheetId'],
'name': sheet['@name']
}
if only_sheets:
sheets.append(sheet['@name'])
else:
sheets.append(sheet_details)
# Delete the extracted files directory
shutil.rmtree(directory_to_extract_to)
return sheets
def xl_import_param(
df_fi: dict,
stab: str,
mfa_problem_input: dict
):
"""Import information from workbook tab called "param" if it exists
- df_fi : dataframe with all sheets of the input file
- stab : name of the workbook tabulation to work on
- mfa_problem_input : dictionnary with informations to convert in JSon format
"""
my_json = []
try:
df_prod = df_fi[stab]
# Fill dataframe nan with default value
df_prod = df_prod.fillna(value='').replace({np.nan: None})
# Extract values (nparray) from dataframe and convert them in a list format
my_json = df_prod.values.tolist()
except Exception:
su_trace.logger.error('Exception in xl_import_param')
# mfa_problem_input['param'] = {'max': le_max, 'tol': tol}
mfa_problem_input['param'] = {}
for row in my_json: # iterable on columns names
mfa_problem_input['param'][row[0]] = row[1]
def xl_import_terbase(
df_fi: dict,
stab: str,
mfa_problem_input: dict
):
"""Import informations from workbook tab called "ter_base" if it exists
- df_fi : dataframe with all sheets of the input file
- stab : name of the workbook tabulation to work on
- mfa_problem_input : dictionnary with informations to convert in JSon format
"""
my_json = {}
try:
li_tmp = [li[1] for li in mfa_problem_input['dim_products']] # list of products (nb of rows)
# list of UNIQUE products ordered by apparition order
li_prod = list(OrderedDict.fromkeys(li_tmp))
nb_prod = len(li_prod) # number of UNIQUE products
# list of UNIQUE sectors ordered by apparition order
li_tmp = [li[1] for li in mfa_problem_input['dim_sectors']]
li_sect = list(OrderedDict.fromkeys(li_tmp))
# loading dataframe from excel file
df_prod = df_fi[stab]
li_col = df_prod.iloc[0, 2:].values.tolist() # Get columns list
li_row = df_prod.iloc[1:, 1].values.tolist() # Get row indexes names
df_prod = df_prod.iloc[1:, 2:] # Values
df_prod.index = li_row
df_prod.columns = li_col
df_prod = df_prod.loc[~df_prod.index.isnull()] # remove all lines with empty index
# We need to check if the nb_prod row still contains column names
# (because in some excel file this line hasn't an empty index)
if df_prod.iloc[nb_prod, 0] in li_sect:
df_prod = df_prod.drop(df_prod.index[nb_prod])
li_xlsect = list(df_prod) # list of sectors (columns) in excel file
# Check if the column order is consistent (well ordered)
if li_xlsect != li_sect:
# We need to rearange columns
try:
df_prod = pd_sorted_col(df_prod, li_sect)
except Exception:
dupes = [x for n, x in enumerate(df_prod.keys()) if x in df_prod.keys()[:n]]
if len(dupes) > 0:
su_trace.logger.error(f'ERROR: Duplicate sectors in ter1. {dupes}')
su_trace.logger.info('Sectors defined in dim that are not in ter1:')
for k in li_sect:
if k not in df_prod.keys():
su_trace.logger.error(f'ERROR {k}')
su_trace.logger.info('Sectors in ter1 that are not defined in dim:')
for k in df_prod.keys():
if k not in li_sect:
su_trace.logger.error(f'ERROR {k}')
df_prod = pd_sorted_col(df_prod, li_sect)
# Extract "supply" part of the dataframe
df_tmp = df_prod[0:nb_prod]
li_xlprod = list(df_tmp.index.values)
if li_xlprod != li_prod:
# We need to rearange rows
dft = df_tmp.transpose()
try:
dft = pd_sorted_col(dft, li_prod)
except Exception:
dupes = [x for n, x in enumerate(dft.keys()) if x in dft.keys()[:n]]
if len(dupes) > 0:
su_trace.logger.error(f'ERROR: Duplicate products in ter1. {dupes}')
su_trace.logger.error('Products defined in dim that are not in ter1:')
for k in li_prod:
if k not in dft.keys():
su_trace.logger.error(f'ERROR: {k}')
su_trace.logger.error('Prodcuts in ter1 that are not defined in dim:')
for k in dft.keys():
if k not in li_prod:
su_trace.logger.error(f'ERROR: {k}')
dft = pd_sorted_col(dft, li_prod)
df_tmp = dft.transpose()
# Extract values (nparray) from dataframe and convert them in a list format
li_tmp = df_tmp.values.tolist()
# Replace Nan by None
li_clean = [[None if val != val else int(val) for val in lign] for lign in li_tmp]
my_json['supply'] = li_clean
# Extract "use" part of the dataframe
ini = nb_prod
df_tmp = df_prod[ini:ini+nb_prod]
li_xlprod = list(df_tmp.index.values)
if li_xlprod != li_prod:
dft = df_tmp.transpose()
dft = pd_sorted_col(dft, li_prod)
df_tmp = dft.transpose()
li_tmp = df_tmp.values.tolist()
li_clean = [[None if val != val else int(val) for val in lign] for lign in li_tmp]
my_json['use'] = li_clean
except Exception as expt:
su_trace.logger.error('Exception in xl_import_terbase: ' + str(expt))
mfa_problem_input['ter_base'] = my_json
def pd_sorted_col(
dft,
lico
):
"""Sort columns order of a dataframe in function of a column list"""
li_df = list(dft)
if li_df != lico:
dftm = pd.DataFrame(columns=lico)
for col in lico:
dftm[col] = dft[col]
return dftm
def xl_convert_tablist(
df_file: str,
tab_list: list
):
""" Convert each tab of a workbook in mfa_problem_input dictionnary entry
- df_file : dataframe with all sheets of the input file
- tab_list : input file worksheet list
"""
mfa_problem_input = {}
for tab in tab_list:
consistant_tab = consistantSheetName(tab)
if consistant_tab == 'param':
xl_import_param(df_file, tab, mfa_problem_input)
elif consistant_tab == 'prod':
jstab = 'dim_products'
if 'dim_products' not in mfa_problem_input:
# List of columns in "pommes-poires.xlsx" exemple: level, Element,
# Bilan matière ?, transport interreg,
# poids consolidation (1 par défaut), table consolidation,
# Sankey ?, Couleur
valdef = [1, '', False, False, None, None, False, '']
xl_import_tab(df_file, tab, valdef, jstab, mfa_problem_input)
elif consistant_tab == 'sect':
jstab = 'dim_sectors'
if 'dim_sectors' not in mfa_problem_input:
# List of columns in "pommes-poires.xlsx" exemple: level, Element,
# Bilan matière ?, transport interreg,
# poids consolidation (1 par défaut), table consolidation,
# Sankey ?, Couleur
valdef = [1, '', False, False, None, None, False, '']
xl_import_tab(df_file, tab, valdef, jstab, mfa_problem_input)
elif consistant_tab == 'data':
jstab = 'data'
if 'data' not in mfa_problem_input:
# List of columns in "pommes-poires.xlsx" exemple: période, Région,
# Table, Origine, Destination, Quantité, Incertitude (%),
# Contrainte 2 sigmas
valdef = ['', '', '', '', '', None, None, None, '', '', '', '']
xl_import_tab(df_file, tab, valdef, jstab, mfa_problem_input)
# valdef = ['', '', '', '', '', None, None, '', None, '', None, '']
elif consistant_tab == 'minmax':
jstab = 'min_max'
if 'min_max' not in mfa_problem_input:
# List of columns in "pommes-poires.xlsx" exemple: Période, Région,
# Table, Origine, Destination, min, max
valdef = ['', '', '', '', '', None, None, '', '', '', '', '']
xl_import_tab(df_file, tab, valdef, jstab, mfa_problem_input)
# valdef = ['', '', '', '', '', None, None, '', '', '', None, '']
elif consistant_tab == 'constr':
jstab = 'constraints'
if 'constraints' not in mfa_problem_input:
# List of columns in "pommes-poires.xlsx" exemple: id, Période, Région,
# Ressources/Emplois, Origine, Destination, eq = 0, eq <= 0, eq >= 0
valdef = [None, '', '', '', '', '', None, None, None]
xl_import_tab(df_file, tab, valdef, jstab, mfa_problem_input)
elif consistant_tab == 'flux':
if 'ter_base' not in mfa_problem_input:
xl_import_terbase(df_file, tab, mfa_problem_input)
else:
pass
return mfa_problem_input
def write_mfa_problem_output_to_excel(
output_file_name: str,
mfa_problem_input: dict,
mfa_problem_output: dict
):
with pd.ExcelWriter(output_file_name, engine='openpyxl', mode='a') as writer:
for tab_name, tab_content in mfa_problem_output.items():
sheet_content = tab_content
# We don't want to write all sub territories TER results
li_tmp = [co[1] for co in mfa_problem_input['dim_sectors']] # sectors list
sectors_names = list(OrderedDict.fromkeys(li_tmp))
write_tab = True
if 'ter' in tab_name:
excluded_list = [sect for sect in sectors_names if sect in tab_name]
if len(excluded_list) != 0:
write_tab = False
if write_tab:
if ((('ter' in tab_name) or ('flux' in tab_name)) and (len(tab_content) > 0)):
su = np.array(tab_content['supply'])
nb_rows = su.shape[0]
use = np.array(tab_content['use'])
df = pd.DataFrame(su)
df.to_excel(
writer, sheet_name=tab_name, index=False, header=False,
startrow=1, startcol=1
)
df = pd.DataFrame(use)
df.to_excel(
writer, sheet_name=tab_name, index=False, header=False,
startrow=nb_rows+3, startcol=1
)
format_excel(writer, tab_name, mfa_problem_input)
else:
df = pd.DataFrame(sheet_content)
df.to_excel(writer, sheet_name=tab_name, index=False, header=False)
def format_excel(
excel_writer: pd.ExcelWriter,
tab_name: str,
mfa_problem_input: dict
):
li_tmp = [co[1] for co in mfa_problem_input['dim_products']] # products list
products_names = list(OrderedDict.fromkeys(li_tmp))
li_tmp = [co[1] for co in mfa_problem_input['dim_sectors']] # sectors list
sectors_names = list(OrderedDict.fromkeys(li_tmp))
col_name = ''
n = len(sectors_names) + 2
while n > 0:
n, r = divmod(n - 1, 26)
col_name = chr(r + ord('A')) + col_name
mysheet = excel_writer.sheets[tab_name]
greybg = PatternFill(bgColor='C0C0C0')
greyft = Font(color='C0C0C0')
rule = CellIsRule(operator='equal', formula=['0'], font=greyft, fill=greybg)
srange = 'C3:' + col_name + str(2+len(products_names))
mysheet.conditional_formatting.add(srange, rule)
srange = 'C' + str(6+len(products_names)) + ':' + col_name + str(5+2*len(products_names))
mysheet.conditional_formatting.add(srange, rule)
def excel_proxy_to_json(
input_file: str,
upper_level_name: str
):
# _____INIT_____
df_input_file = {}
dict_tab_list = {}
proxy_input = {}
df_input_file = pd.read_excel(input_file, None)
real_tablist = [*df_input_file]
# _____CONFIG_____
consistant_tab_list = ['geo', 'proxy', 'pflow', 'psect', 'data']
# keeping only consistent sheets
for real_tab in real_tablist:
consistent_sheet = consistantSheetName(real_tab)
if consistent_sheet in consistant_tab_list:
# real_tab is consistent
dict_tab_list[consistent_sheet] = real_tab
# _____FILL_____
df_input_data = df_input_file[dict_tab_list['data']]
mask = df_input_data['region'] == upper_level_name
input_data = np.array(df_input_data[mask].values)
proxy_input['data'] = input_data
proxy_input['proxis_flows'] = np.array(df_input_file[dict_tab_list['pflow']].values)
proxy_input['proxis_sectors'] = np.array(df_input_file[dict_tab_list['psect']].values)
regions = np.array(df_input_file[dict_tab_list['geo']].values)
proxy_input['regions'] = [r[0] for r in regions]
proxy_input['main_reg'] = upper_level_name
proxy_input['years'] = [input_data[0, 0]]
proxis = np.array(df_input_file[dict_tab_list['proxy']].values)
proxy_input['proxis'] = proxis[:, [0, 1, 2]]
# building data_ps (used for proxis sectors)
data_ps = np.append(input_data[:, 0:1].astype(str), input_data[:, 1:], axis=1)
ps = np.array(()).reshape((0, 2))
for r in input_data:
if r[2] in ['R', 'r', 'S', 's']:
ps = np.append(ps, np.array([[r[4], r[3]]]), axis=0)
else:
ps = np.append(ps, np.array([[r[3], r[4]]]), axis=0)
proxy_input['data_ps'] = np.append(data_ps, ps, axis=1)
proxy_input['headers'] = [
'period', 'region', 'table', 'origin', 'destination', 'value',
'uncert', 'constraint', 'quantity', 'unit', 'factor', 'source', 'product', 'sector'
]
return proxy_input
def write_proxy_output_in_excel(
input_file: str,
headers: list,
sheet_name: str,
proxy_output # array with proxy output results
):
try:
act_xl = load_workbook(input_file)
with pd.ExcelWriter(input_file, engine='openpyxl') as writer:
df_data = pd.DataFrame(proxy_output, columns=headers[:12])
writer.book = act_xl
df_data.to_excel(writer, sheet_name=sheet_name, index=False)
return True
except Exception as expt:
su_trace.logger.info('Exception ! Message : ' + str(expt))
return False | PypiClean |
/FiberPhotometryDataAnalysis-0.0.9.tar.gz/FiberPhotometryDataAnalysis-0.0.9/ci/bootstrap.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
import sys
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
base_path = dirname(dirname(abspath(__file__)))
def check_call(args):
print("+", *args)
subprocess.check_call(args)
def exec_in_env():
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
check_call([sys.executable, "-m", "venv", env_path])
except subprocess.CalledProcessError:
try:
check_call([sys.executable, "-m", "virtualenv", env_path])
except subprocess.CalledProcessError:
check_call(["virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment...")
check_call([join(bin_path, "pip"), "install", "jinja2", "tox"])
python_executable = join(bin_path, "python")
if not os.path.exists(python_executable):
python_executable += '.exe'
print("Re-executing with: {0}".format(python_executable))
print("+ exec", python_executable, __file__, "--no-env")
os.execv(python_executable, [python_executable, __file__, "--no-env"])
def main():
import jinja2
print("Project path: {0}".format(base_path))
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [
line.strip()
# 'tox' need not be installed globally, but must be importable
# by the Python that is running this script.
# This uses sys.executable the same way that the call in
# cookiecutter-pylibrary/hooks/post_gen_project.py
# invokes this bootstrap.py itself.
for line in subprocess.check_output([sys.executable, '-m', 'tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line.startswith('py')]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
if __name__ == "__main__":
args = sys.argv[1:]
if args == ["--no-env"]:
main()
elif not args:
exec_in_env()
else:
print("Unexpected arguments {0}".format(args), file=sys.stderr)
sys.exit(1) | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/testing/xcomp.py | import xml.dom.minidom as dom
from django.utils.six import string_types
# define node types
ELEMENT_NODE = dom.Element.ELEMENT_NODE
ATTRIBUTE_NODE = dom.Element.ATTRIBUTE_NODE
TEXT_NODE = dom.Element.TEXT_NODE
CDATA_SECTION_NODE = dom.Element.CDATA_SECTION_NODE
ENTITY_REFERENCE_NODE = dom.Element.ENTITY_REFERENCE_NODE
ENTITY_NODE = dom.Element.ENTITY_NODE
PROCESSING_INSTRUCTION_NODE = dom.Element.PROCESSING_INSTRUCTION_NODE
COMMENT_NODE = dom.Element.COMMENT_NODE
DOCUMENT_NODE = dom.Element.DOCUMENT_NODE
DOCUMENT_TYPE_NODE = dom.Element.DOCUMENT_TYPE_NODE
DOCUMENT_FRAGMENT_NODE = dom.Element.DOCUMENT_FRAGMENT_NODE
NOTATION_NODE = dom.Element.NOTATION_NODE
# define note type to string conversion
NODE_DICT = {
ELEMENT_NODE : "ELEMENT_NODE",
ATTRIBUTE_NODE : "ATTRIBUTE_NODE",
TEXT_NODE : "TEXT_NODE",
CDATA_SECTION_NODE : "CDATA_SECTION_NODE",
ENTITY_REFERENCE_NODE : "ENTITY_REFERENCE_NODE",
ENTITY_NODE : "ENTITY_NODE",
PROCESSING_INSTRUCTION_NODE : "PROCESSING_INSTRUCTION_NODE",
COMMENT_NODE : "COMMENT_NODE",
DOCUMENT_NODE : "DOCUMENT_NODE",
DOCUMENT_TYPE_NODE : "DOCUMENT_TYPE_NODE",
DOCUMENT_FRAGMENT_NODE : "DOCUMENT_FRAGMENT_NODE",
NOTATION_NODE : "NOTATION_NODE",
}
# exceptions
class XMLError( Exception ) :
""" XML base error error """
class XMLParseError( XMLError ) :
""" XML parse error """
class XMLMismatchError( XMLError ) :
""" XML mismatch error """
#-------------------------------------------------------------------------------
# low level utilities
def _getNodeName( node ) :
""" get full node name in '{namespace}tagName' format """
if ( node.namespaceURI is None ) :
return node.nodeName
else :
return "{%s}%s"%( node.namespaceURI , node.localName )
def _packName( pair ) :
""" pack the (<namespace>,<localname>) tuple to curly bracket notation
{<namespace>}<localname> """
if ( pair[0] is None ) :
return pair[1]
else :
return "{%s}%s"%(pair[0],pair[1])
def _skipIgnorable( node , path ) :
""" get node sibling skipping empty text nodes and comments """
while ( node is not None ) :
# expected nodes - return immediatelly
if node.nodeType in (ELEMENT_NODE,CDATA_SECTION_NODE): break
# special treatment of text nodes - ignore blank text
if node.nodeType == TEXT_NODE :
# ignore blank text
if 0 < len( node.wholeText.strip() ) : break
# unexpected nodes - raise exception
if node.nodeType in (ATTRIBUTE_NODE,DOCUMENT_NODE,DOCUMENT_FRAGMENT_NODE,
NOTATION_NODE,ENTITY_REFERENCE_NODE,ENTITY_NODE,DOCUMENT_TYPE_NODE):
raise XMLParseError("Unexpected child node '%s' ! PATH='%s'" % (NODE_DICT[node.nodeType],path))
# the rest is just ignored
#if node.nodeType in (COMMENT_NODE,PROCESSING_INSTRUCTION_NODE) : pass
node = node.nextSibling
return node
def _compareAttributes( a0 , a1 , level , path , verbose = False ) :
# both nodes have no attributes
if ( a0 is None ) and ( a1 is None ) : return
#attribute mismatch
if ( a0 is None ) or ( a1 is None ) :
raise XMLMismatchError("Attribute mismatch! PATH=\"%s\""%path)
# get list of attributes and filter-out namespace definitions
isNotNS = lambda v : ( v[0][0] != "http://www.w3.org/2000/xmlns/" )
packName = lambda v : ( _packName(v[0]) , v[1].strip() )
items0 = sorted( map( packName , filter( isNotNS , a0.itemsNS() ) ) )
items1 = sorted( map( packName , filter( isNotNS , a1.itemsNS() ) ) )
if len( items0 ) != len( items0 ) :
if verbose :
for item in items0 :
print (" < \t %s@%s=\"%s\"" %( path , item[0] , item[1] ))
for item in items1 :
print (" > \t %s@%s=\"%s\"" %( path , item[0] , item[1] ))
raise XMLMismatchError("Attribute count mismatch! PATH=\"%s\""%path)
for pair in zip( items0 , items1 ) :
if verbose :
print (" < \t %s@%s=\"%s\"" %( path , pair[0][0] , pair[0][1] ))
print (" > \t %s@%s=\"%s\"" %( path , pair[1][0] , pair[1][1] ))
if ( pair[0] != pair[1]) :
raise XMLMismatchError("Attribute mismatch! PATH=\"%s\""%path)
def _compareNode( n0 , n1 , level = 0 , path = "/" , verbose = False ) :
""" compare DOM node or element subtree """
#nn0 , nn1 = _getNodeName( n0 ), _getNodeName( n1 )
nn0 , nn1 = n0.nodeName, n1.nodeName
path0 = "%s/%s"%( path , nn0 ) if level > 1 else "/%s"%nn0 if level == 1 else _getNodeName( n0 )
path1 = "%s/%s"%( path , nn1 ) if level > 1 else "/%s"%nn1 if level == 1 else _getNodeName( n0 )
if verbose :
print ("< \t %s" %( path0 ))
print ("> \t %s" %( path1 ))
# compare node name and node type
if (( n0.nodeType != n1.nodeType )
or ( _getNodeName( n0 ) != _getNodeName( n1 ) )):
raise XMLMismatchError("Node mismatch! PATH0=\"%s\" vs. PATH1=\"%s\""%(path0,path1))
# compare attributes
_compareAttributes( n0.attributes , n1.attributes , level , path0 , verbose )
# in case of text-nodes and CDATA section check the content
if n0.nodeType == TEXT_NODE :
if verbose :
print (" < TEXT: \t \"%s\"" % n0.wholeText.strip())
print (" > TEXT: \t \"%s\"" % n1.wholeText.strip())
if n0.wholeText.strip() != n1.wholeText.strip() :
raise XMLMismatchError("Text mismatch! PATH=\"%s\""%(path))
return
if n0.nodeType == CDATA_SECTION_NODE :
if verbose :
print (" < CDATA: \t \"%s\"" % n0.wholeText)
print (" > CDATA: \t \"%s\"" % n1.wholeText)
if n0.wholeText != n1.wholeText :
raise XMLMismatchError("CDATA mismatch! PATH=\"%s\""%(path))
return
# get first child
nn0 = _skipIgnorable( n1.firstChild , path )
nn1 = _skipIgnorable( n0.firstChild , path )
while ( nn0 is not None ) and ( nn1 is not None ) :
# sublevel comparison
_compareNode( nn0 , nn1 , level+1 , path0 , verbose )
#get next sibling
nn0 = _skipIgnorable( nn0.nextSibling , path )
nn1 = _skipIgnorable( nn1.nextSibling , path )
# make sure there are no remaining nodes
if not (( nn0 is None ) and ( nn1 is None )) :
raise XMLMismatchError("Childern count mismatch! PATH=\"%s\""%path0)
#-------------------------------------------------------------------------------
def xmlCompareDOMs( xml0 , xml1 , verbose = False ) :
""" Compare two XML documents passed as DOM trees (xml.dom.minidom)."""
return _compareNode( xml0 , xml1 , verbose = verbose )
def xmlCompareStrings( str0 , str1 , verbose = False ) :
""" Compare two XML documents passed as strings. """
def parse( src , label ) :
try :
return dom.parseString( src )
except Exception as e :
raise XMLParseError("Failed to parse %s XML string! %s" % ( label , str(e) ))
return xmlCompareDOMs( parse(str0,"the first") , parse(str1,"the second") , verbose )
def xmlCompareFiles( src0 , src1 , verbose = False ) :
""" Compare two XML documents passed as filenames, file or file-like objects."""
def parseFileName( src ) :
try :
with open( src ) as fid :
return dom.parse( fid )
except Exception as e :
raise XMLParseError("Failed to parse the \"%s\" file! %s" % ( src , str(e) ))
def parseFileObj( src , label ) :
try :
return dom.parse( src )
except Exception as e :
raise XMLParseError("Failed to parse the %s XML file(-like) object! %e" % ( label , str(e) ))
def parse( src , label ) :
return parseFileName( src ) if ( type(src) in string_types ) else parseFileObj( src , label )
return xmlCompareDOMs( parse(src0,"the first") , parse(src1,"the second") , verbose )
#------------------------------------------------------------------------------- | PypiClean |
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/README.rst | MapProxy is an open source proxy for geospatial data. It caches, accelerates and transforms data from existing map services and serves any desktop or web GIS client.
.. image:: https://mapproxy.org/mapproxy.png
MapProxy is a tile cache, but also offers many new and innovative features like full support for WMS clients.
MapProxy is released under the Apache Software License 2.0, runs on Unix/Linux and Windows and is easy to install and to configure.
Go to https://mapproxy.org/ for more information.
The latest documentation is available at: http://mapproxy.github.io/mapproxy/
Older documentation is available at: https://mapproxy.org/documentation
| PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/BackgroundIframe.js.uncompressed.js | define("dijit/BackgroundIframe", [
"require", // require.toUrl
".", // to export dijit.BackgroundIframe
"dojo/_base/config",
"dojo/dom-construct", // domConstruct.create
"dojo/dom-style", // domStyle.set
"dojo/_base/lang", // lang.extend lang.hitch
"dojo/on",
"dojo/_base/sniff", // has("ie"), has("mozilla"), has("quirks")
"dojo/_base/window" // win.doc.createElement
], function(require, dijit, config, domConstruct, domStyle, lang, on, has, win){
// module:
// dijit/BackgroundIFrame
// summary:
// new dijit.BackgroundIframe(node)
// Makes a background iframe as a child of node, that fills
// area (and position) of node
// TODO: remove _frames, it isn't being used much, since popups never release their
// iframes (see [22236])
var _frames = new function(){
// summary:
// cache of iframes
var queue = [];
this.pop = function(){
var iframe;
if(queue.length){
iframe = queue.pop();
iframe.style.display="";
}else{
if(has("ie") < 9){
var burl = config["dojoBlankHtmlUrl"] || require.toUrl("dojo/resources/blank.html") || "javascript:\"\"";
var html="<iframe src='" + burl + "' role='presentation'"
+ " style='position: absolute; left: 0px; top: 0px;"
+ "z-index: -1; filter:Alpha(Opacity=\"0\");'>";
iframe = win.doc.createElement(html);
}else{
iframe = domConstruct.create("iframe");
iframe.src = 'javascript:""';
iframe.className = "dijitBackgroundIframe";
iframe.setAttribute("role", "presentation");
domStyle.set(iframe, "opacity", 0.1);
}
iframe.tabIndex = -1; // Magic to prevent iframe from getting focus on tab keypress - as style didn't work.
}
return iframe;
};
this.push = function(iframe){
iframe.style.display="none";
queue.push(iframe);
}
}();
dijit.BackgroundIframe = function(/*DomNode*/ node){
// summary:
// For IE/FF z-index schenanigans. id attribute is required.
//
// description:
// new dijit.BackgroundIframe(node)
// Makes a background iframe as a child of node, that fills
// area (and position) of node
if(!node.id){ throw new Error("no id"); }
if(has("ie") || has("mozilla")){
var iframe = (this.iframe = _frames.pop());
node.appendChild(iframe);
if(has("ie")<7 || has("quirks")){
this.resize(node);
this._conn = on(node, 'resize', lang.hitch(this, function(){
this.resize(node);
}));
}else{
domStyle.set(iframe, {
width: '100%',
height: '100%'
});
}
}
};
lang.extend(dijit.BackgroundIframe, {
resize: function(node){
// summary:
// Resize the iframe so it's the same size as node.
// Needed on IE6 and IE/quirks because height:100% doesn't work right.
if(this.iframe){
domStyle.set(this.iframe, {
width: node.offsetWidth + 'px',
height: node.offsetHeight + 'px'
});
}
},
destroy: function(){
// summary:
// destroy the iframe
if(this._conn){
this._conn.remove();
this._conn = null;
}
if(this.iframe){
_frames.push(this.iframe);
delete this.iframe;
}
}
});
return dijit.BackgroundIframe;
}); | PypiClean |
/ENPC-Aligner-1.0.5.tar.gz/ENPC-Aligner-1.0.5/README.md | # Bibliothèque d'alignement de textes dans des langues différentes
# Théorie
Le poster de présentation des méthodes implémentées dans cette bibliothèque est [ici](/doc/theory.pdf).
# Installer la bibliothèque
Lancez dans un terminal :
`sudo pip install ENPC-Aligner`
# Tester la bibliothèque
Lancez dans un terminal :
`align-example`
## Textes sources
* Bible
* fr : http://godieu.com/doc/telechargement.html (Bible Jean Frédéric Ostervald 1996)
* en : http://www.truth.info/download/bible.htm (King James Bible also known as the Authorised Version)
* Le petit prince
* fr : http://lepetitprinceexupery.free.fr/telecharger/le-petit-prince--antoine-de-saint-exupery.txt
* en : https://www.odaha.com/antoine-de-saint-exupery/maly-princ/the-little-prince
En 20 langues : http://www.malyksiaze.net/us/ksiazka
* Pinocchio :
* fr : https://www.ebooksgratuits.com/pdf/collodi_pinocchio.pdf
* en : http://www.gutenberg.org/files/500/500-0.txt
## Development
```
sudo pip install -e .
```
Cela créer un lien symbolique dans site-packages vers le répo pour que les modifications des sources prennent effet immédiatement.
## Publish to PyPi
Mettre dans ~/.pypirc
```
[distutils]
index-servers =
pypi
[pypi]
repository: https://pypi.org/project/ENPC-Aligner/
username: <username>
password: <password>
```
Modifier le numéro de version dans *enpc_aligner/version.py* et lancer
```
python setup.py sdist upload -r pypi
```
| PypiClean |
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/forms/migrations/south/0001_initial.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = [
('pages', '0001_initial'),
]
def forwards(self, orm):
# Adding model 'Form'
db.create_table('forms_form', (
('email_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('page_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['pages.Page'], unique=True, primary_key=True)),
('email_copies', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('button_text', self.gf('django.db.models.fields.CharField')(default=u'Submit', max_length=50)),
('response', self.gf('mezzanine.core.fields.HtmlField')()),
('content', self.gf('mezzanine.core.fields.HtmlField')()),
('send_email', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('email_subject', self.gf('django.db.models.fields.CharField')(max_length=200, blank=True)),
('email_from', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
))
db.send_create_signal('forms', ['Form'])
# Adding model 'Field'
db.create_table('forms_field', (
('field_type', self.gf('django.db.models.fields.CharField')(max_length=55)),
('_order', self.gf('django.db.models.fields.IntegerField')(null=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fields', to=orm['forms.Form'])),
('default', self.gf('django.db.models.fields.CharField')(max_length=2000, blank=True)),
('required', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('label', self.gf('django.db.models.fields.CharField')(max_length=200)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=True, blank=True)),
('help_text', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
('choices', self.gf('django.db.models.fields.CharField')(max_length=1000, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('forms', ['Field'])
# Adding model 'FormEntry'
db.create_table('forms_formentry', (
('entry_time', self.gf('django.db.models.fields.DateTimeField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('form', self.gf('django.db.models.fields.related.ForeignKey')(related_name='entries', to=orm['forms.Form'])),
))
db.send_create_signal('forms', ['FormEntry'])
# Adding model 'FieldEntry'
db.create_table('forms_fieldentry', (
('entry', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fields', to=orm['forms.FormEntry'])),
('field_id', self.gf('django.db.models.fields.IntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.CharField')(max_length=2000)),
))
db.send_create_signal('forms', ['FieldEntry'])
def backwards(self, orm):
# Deleting model 'Form'
db.delete_table('forms_form')
# Deleting model 'Field'
db.delete_table('forms_field')
# Deleting model 'FormEntry'
db.delete_table('forms_formentry')
# Deleting model 'FieldEntry'
db.delete_table('forms_fieldentry')
models = {
'core.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forms.field': {
'Meta': {'object_name': 'Field'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': "orm['forms.Form']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'forms.fieldentry': {
'Meta': {'object_name': 'FieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': "orm['forms.FormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000'})
},
'forms.form': {
'Meta': {'object_name': 'Form', '_ormbases': ['pages.Page']},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('mezzanine.core.fields.HtmlField', [], {}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'forms.formentry': {
'Meta': {'object_name': 'FormEntry'},
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': "orm['forms.Form']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'pages.page': {
'Meta': {'object_name': 'Page'},
'_keywords': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('mezzanine.core.fields.HtmlField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'keywords': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Keyword']", 'symmetrical': 'False', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
}
}
complete_apps = ['forms'] | PypiClean |
/EARL-pytorch-0.5.1.tar.gz/EARL-pytorch-0.5.1/rlgym_tools/replay_converter.py | import logging
from collections import Counter
from typing import Union
import carball as cb
import numpy as np
from carball.analysis.analysis_manager import AnalysisManager
from carball.controls.controls import ControlsCreator
from rlgym.utils import math
from rlgym.utils.common_values import ORANGE_TEAM, BLUE_TEAM, BOOST_LOCATIONS
from rlgym.utils.gamestates import GameState, PhysicsObject, PlayerData
boost_locations = np.array(BOOST_LOCATIONS) # Need ndarray for speed
_invert = np.array((-1, -1, 1))
_rot_correct = np.array((-1, 1, -1))
def convert_replay(replay: Union[str, AnalysisManager]):
if isinstance(replay, str):
replay = cb.analyze_replay_file(replay, logging_level=logging.CRITICAL)
ControlsCreator().get_controls(replay.game)
boost_timers = np.zeros(34)
demo_timers = np.zeros(len(replay.game.players))
blue_goals = 0
orange_goals = 0
goals = list(replay.game.goals)[::-1]
touches = list(replay.protobuf_game.game_stats.hits)[::-1]
demos = list(replay.game.demos)[::-1]
match_goals = Counter()
match_saves = Counter()
match_shots = Counter()
match_demos = Counter()
match_boost_pickups = Counter()
boost_amounts = {}
last_locations = {}
player_pos_pyr_vel_angvel_boost_controls = { # Preload useful arrays so we can fetch by index later
player.online_id: (
player.data[["pos_x", "pos_y", "pos_z"]].values.astype(float),
player.data[["rot_x", "rot_y", "rot_z"]].fillna(0).values.astype(float) * _rot_correct,
player.data[["vel_x", "vel_y", "vel_z"]].fillna(0).values.astype(float) / 10,
player.data[["ang_vel_x", "ang_vel_y", "ang_vel_z"]].fillna(0).values.astype(float) / 1000,
player.data["boost"].fillna(0).astype(float) / 255,
player.controls[["throttle", "steer", "pitch", "yaw", "roll",
"jump", "boost", "handbrake"]].fillna(0).values.astype(float),
)
for player in replay.game.players
}
ball_pos_pyr_vel_angvel = (
replay.game.ball[["pos_x", "pos_y", "pos_z"]].values.astype(float),
replay.game.ball[["rot_x", "rot_y", "rot_z"]].fillna(0).values.astype(float) * _rot_correct,
replay.game.ball[["vel_x", "vel_y", "vel_z"]].fillna(0).values.astype(float) / 10,
replay.game.ball[["ang_vel_x", "ang_vel_y", "ang_vel_z"]].fillna(0).values.astype(float) / 1000,
)
rallies = []
for kf1, kf2 in zip(replay.game.kickoff_frames, replay.game.kickoff_frames[1:] + [replay.game.frames.index[-1]]):
for goal in replay.game.goals:
if kf1 < goal.frame_number < kf2:
rallies.append((kf1, goal.frame_number))
break
else: # No goal between kickoffs
rallies.append((kf1, kf2))
last_frame = 0
for i, (frame, ball_row) in enumerate(replay.game.ball.iterrows()):
for start, end in rallies:
if start <= frame < end:
# del rallies[0]
break
else:
continue
state = GameState()
# game_type
state.game_type = -1
# blue_score/orange_score
if len(goals) > 0 and goals[-1].frame_number <= frame:
goal = goals.pop()
match_goals[goal.player.online_id] += 1
if goal.player_team == 0:
blue_goals += 1
else:
orange_goals += 1
state.blue_score = blue_goals
state.orange_score = orange_goals
# last_touch
touched = set()
while len(touches) > 0 and touches[-1].frame_number <= frame:
touch = touches.pop()
p_id = touch.player_id.id
state.last_touch = p_id
touched.add(p_id)
if touch.save:
match_saves[p_id] += 1
if touch.shot:
match_shots[p_id] += 1
# demos for players
demoed = set()
while len(demos) > 0 and demos[-1]["frame_number"] <= frame:
demo = demos.pop()
attacker = demo["attacker"].online_id
victim = demo["victim"].online_id
match_demos[attacker] += 1
demoed.add(victim)
# players
actions = []
for n, player in enumerate(replay.game.players):
player_data = PlayerData()
if player.online_id in demoed:
demo_timers[n] = 3
player_data.car_id = player.online_id
player_data.team_num = ORANGE_TEAM if player.team.is_orange else BLUE_TEAM
player_data.match_goals = match_goals[player.online_id]
player_data.match_saves = match_saves[player.online_id]
player_data.match_shots = match_shots[player.online_id]
player_data.match_demolishes = match_demos[player.online_id]
player_data.boost_pickups = match_boost_pickups[player.online_id]
player_data.is_demoed = demo_timers[n] > 0
player_data.on_ground = None # Undefined
player_data.ball_touched = player.online_id in touched
player_data.has_flip = None # Undefined, TODO use jump_active, double_jump_active and dodge_active?
pos, pyr, vel, ang_vel, boost, controls = (v[i] for v in
player_pos_pyr_vel_angvel_boost_controls[player.online_id])
player_data.boost_amount = boost
if np.isnan(pos).any():
pos = last_locations[player.online_id]
else:
last_locations[player.online_id] = pos
player_data.car_data = PhysicsObject(
position=pos,
quaternion=math.rotation_to_quaternion(math.euler_to_rotation(pyr)),
linear_velocity=vel,
angular_velocity=ang_vel
)
player_data.inverted_car_data = PhysicsObject(
position=pos * _invert,
quaternion=math.rotation_to_quaternion((math.euler_to_rotation(pyr).T * _invert).T),
linear_velocity=vel * _invert,
angular_velocity=ang_vel * _invert
)
old_boost = boost_amounts.get(player.online_id, float("inf"))
boost_change = boost - old_boost
boost_amounts[player.online_id] = player_data.boost_amount
if boost_change > 0 and not (old_boost == 0 and boost == 85 / 255): # Ignore boost gains on spawn
closest_boost = np.linalg.norm(boost_locations - pos, axis=-1).argmin()
if boost_locations[closest_boost][1] > 72:
boost_timers[closest_boost] = 10
else:
boost_timers[closest_boost] = 4
match_boost_pickups[player.online_id] += 1
state.players.append(player_data)
actions.append(controls)
# ball
pos, pyr, vel, ang_vel = (v[i] for v in ball_pos_pyr_vel_angvel)
if np.isnan(pos).any():
continue # Goal scored, go next
state.ball = PhysicsObject(
position=pos,
quaternion=math.rotation_to_quaternion(math.euler_to_rotation(pyr)),
linear_velocity=vel,
angular_velocity=ang_vel
)
# inverted_ball
state.inverted_ball = PhysicsObject(
position=pos * _invert,
quaternion=math.rotation_to_quaternion((math.euler_to_rotation(pyr).T * _invert).T),
linear_velocity=vel * _invert,
angular_velocity=ang_vel * _invert
)
# boost_pads
state.boost_pads = (boost_timers == 0) * 1
# inverted_boost_pads
state.inverted_boost_pads = state.boost_pads[::-1]
d_time = (frame - last_frame) / 30 # Maybe use time delta from replay instead?
boost_timers -= d_time # Should this be before or after values are set?
demo_timers -= d_time
boost_timers[boost_timers < 0] = 0
demo_timers[demo_timers < 0] = 0
last_frame = frame
state.players.sort(key=lambda p: (p.team_num, p.car_id))
yield state, actions | PypiClean |
/Jalapeno-Lite-0.1.3.tar.gz/Jalapeno-Lite-0.1.3/Jalapeno_data/Sites/first/Pages/blog/10.md | title: Python爬虫: 带你上车之爬取妹子图
date: 2016-09-27
tag: Python爬虫
[TOC]
<!--Sidebar-->
##简介
30行python轻松爬取成百上千的妹子图到本地。没时间解释了,快上车。
###什么是爬虫?
网络爬虫,顾名思义就是在网上爬来爬去的“虫子”,它能够按照一定规则自动抓取网络数据的脚本。比如说你找到了一个特别棒的网站,上面全是妹子图。而你想把它们存到你的随身硬盘当中。如果你要一张一张保存的话那需要比较持久的耐力,这个时候你就需要通过爬虫来帮你抓取你心心念念的妹子图。
那么如何通过爬虫来完成任务呢?
###运行机制
其实爬虫的工作流程和人是一样的,都需要经过下面几个步骤:
> 使用本机的IP连接到网络 ->使用地址登入网站 ->看到网页内容 ->筛选需要的信息 -> 保存下载 -> 登入新网页 ->重复之前的动作
是不是非常相似?
###为什么使用python
很多编程语言都可以写爬虫,可我们为什么选择python呢?总的来说就是四个字:**简单够用**:
- Python语法简单,开发效率高
- Python 有着丰富第三方爬虫工具库(requests,scrapy,BeautifulSoup)
- 爬虫的速度瓶颈大多是在网络阻塞上,非超大规模爬取很少遇到计算性能瓶颈
- Python起初被用来开发搜索引擎,所以关于爬虫的资料很多,社区活跃
让我们开始吧!
<!--More-->
首先先创建一个后缀为.py的python文件(名字自己想.py)
##工具准备
由于这次只是一个简单的小项目,我们并不需要使用第三方库,我们需要的只有python3
- Python3
- urllib.request
熟悉python2的对urllib库一定不陌生,我们要用的是其中的urlopen方法
- re(正则表达式)
正则表达式是根据一定规则来匹配相应字符串,从网页中提取我们需要的内容
- time 设定休眠时间,减慢爬取速度,减少对方服务器的压力
- gzip 对于那些使用网页压缩技术的网站,我们需要将它解压
来看我们第一段代码,在我们的文件开头导入需要的工具
import urllib.request
import re
import time
import gzip
接下来我们就需要使用urllib库来登入网站
##使用urllib读取网页内容
为了准备这个教程,我不(hou)辞(yan)劳(wu)苦(chi)地找来了[优妹子](http://www.youmzi.com)来作为我们今天要爬的网站。(真的是为了教学),在下载妹子的图片之前,我们需要先分析通过网站的源代码来找出我们需要的图片链接。可能你没有学过HTML,看不懂网页的源代码,但是没关系,我们要做的事情有一半浏览器替我们做了,剩下的一半就是找!规
!律!
我们知道爬虫会增加对方服务器的压力,有的时候如果对方发现你使用的爬虫而不是用户的话,就会切断连接导致爬取中断(如果没有断点续传功能就等于失败), 所以我们需要将我们的爬虫看起来更像用户一样。当然爬虫和反爬虫这里的内容太多这里不会做过多讲解,在这里我们需要给我们的爬虫添加header的信息,因为有些服务器会对请求的header做检查:
header = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
是不是很多东西很眼熟?对了,我们发送这段请求让服务器知道我们是一个用户在使用Windows NT 6.1(就是win7) 的Firefox浏览器来浏览网页,为了不让代码看起来特别乱,我们先将它保存在一个变量中,
接着我们要把网站使用字符串的形式保存在变量url中:
url = "http://www.youmzi.com"
使用urllib.request的Request方法来向对方服务器发送请求,格式为(网址,[参数..]),将我们的header变量作为headers参数传递进去。
requests = urllib.request.Request(url,headers = header)
接着使用urlopen方法打开网页(刚才请求返回的结果)
opened = urllib.request.urlopen(requests)
读取我们的网页内容(源代码)并解码
content = opened.read().decode('gbk')
这里我们使用read()方法来进行读取并在后面添加decode方法对输出结果进行解码,不同网页使用不同的编码标准,一般来说使用utf8格式,但是我们在源代码的前几行发现写着<meta charset = 'gb2312'>,这是不一样的编码方式。但是当我们使用 decode('gb2312')并不管用。你灵机一动,想到了GBK,这是一种非常常用的中文编码格式,于是就有了上面那行代码
这个时候你再试图print出来content的内容,得到的就是网页源代码,当你使用浏览器的时候,右键点击网页也会出来检查源代码的选项。这就是我们获取的内容,说明你已经成功连接到了网站
但是这一堆乱七八糟的字符让我怎么找到妹子图呢
别着急,我们要进行非常重要的步骤,网页解析
##使用正则表达式
###正则表达式简介
[正则表达式](http://www.runoob.com/regexp/regexp-syntax.html)是一种使用特定字符来匹配字符串的模式,它可以让我们在不确定内容的情况下进行模糊匹配。
正则表达式的语法内容很多,如果想要了解更多请点击前面的链接或自行搜索。但是本着”一招在手,天下我有“的精神,我们使用经典的“.\*?"来进行匹配。你可能猛一看这是什么鬼,这可是我们找到妹子图的关键法宝,其中:
- '.' 代表了任意字符
- '\*' 代表了匹配无限次
- '?' 代表了使用非贪婪模式,尽可能少的进行匹配
- () 有括号的会被提取,无括号的只会被用来匹配不会提取
举个栗子,在'<fdakdhaf>内容<dakflahf>'这个字符串当中我们只需要匹配开头,结尾,内容两边的标志,并且使用括号标志我们需要提取的内容就可以了。
<.*?>(.*?)<.*?>
变成人话就是
<管他是什么>管他是什么我要了<管他是什么>
###构建我们的表达式
怎么样很简单吧,现在我们就需要对网页源代码进行解析,回到浏览器,右键点击一张妹子图,然后点检查(chrome)/审查元素(Safari)。你会看到一个窗口显示网页的源代码,高亮的部分是所选内容的代码,将鼠标移动到不同的代码上,网页中会用阴影部分表示出你当前代码所展示的内容,我们来右键点击检查一张图片:
<img src="http://ymz.qqwmb.com/allimg/c160926/14JY6111Q560-L3G6_lit.jpg" border="0" width="160" alt="美媛馆 [MyGirl] 2016.09.12 VOL.225 xxxxxx">
其中jpg所标记的那个链接就是我们要的链接,但是我们不能只用双引号匹配,因为双引号内包含的内容不只有链接,所以我们尽量多描述一点来让我们的匹配更加精准。
<img src="(.*?)".*?>
这样就好了嘛,还没有。img是图片标签,网站上那么多图片,你不能把网站的广告logo什么都抓下来吧,这时候你就需要移动你的鼠标找规律,在保持单个完整性的同时多向外部拓展一层,你匹配的就更准确。比如现在在img标签,外面有个a标签,鼠标放上去也指向图片,a标签外面是li标签,还是指向图片,li外面是div标签,还是..不,这次指向很多图片了,所以我们应该使用图片外面的li标签。我们来看代码
<li><a href="http://www.youmzi.com/12255.html" title="尤果网 UGirls Vol.205 香川颖 日系美女" target="_blank"><img src="http://ymz.qqwmb.com/allimg/c160922/14J54TECK0-c4X8_lit.jpg" border="0" width="160" alt="尤果网 UGirls Vol.205 香川颖 日系美女" /></a><p><a href="http://www.youmzi.com/12255.html" title="尤果网 UGirls Vol.205 香川颖 日系美女" target="_blank"> 尤果网 UGirls Vol.205 </a> </p></li>
头都大了,这什么啊。不要惊慌,我们发现又一个规律:除了img标签外,a,li,p标签都是
<li><a></a><p></p></li>
这个样子的,有头有尾。这样以来我们就找到头,尾和我们要的内容,然后把其他的模糊匹配掉,得到了
<li>.*?<img src="(.*?)".*?</li>
正则表达式就是这么神奇。
###调用re模块
有了表达式,我们就需要使用开头导入的re模块来进行解析,首先用re.compile把解析方法存入变量:
repattern = re.compile(r'<li>.*?<img src="(.*?)".*?</li>',re.S)
接着使用re.findall来根据方法从源代码提取出来需要的内容
girls_link = re.findall(repattern,content)
其中repattern是方法,content是我们刚刚得到的源代码,这个时候re.findall会把所有匹配到的内容放到一个列表当中并且储存到girls_link这个变量:
[妹子图链接1,妹子图链接2 ,........]
到目前为止,我们已经可以找到这一页中所有妹子图的链接了,接下来我们需要储存到本地。
##储存到本地
储存的过程就很简单了,由于我们有多个链接,我们需要使用for循环来遍历列表里的所有链接。
#!python
#文件名计数器
girl = 0
for each in girls_link:
#创建文件,文件名从零开始的数字,格式为jpg,写入方法为'wb'二进制写入
a = open(str(girl)+'.jpg','wb')
#使用urllib访问网页并读取内容
b = urllib.request.Request(each,headers =header)
c = urllib.request.urlopen(b)
e = c.read()
#将内容写入文件
a.write(e)
print("No. %d Girl downloaded"%girl)
#计数器+1,进行下一次
girl += 1
#暂停一秒钟,人为降低速度
time.sleep(1)
这样你就可以发现和你的.py文件一起突然多出了好多图片文件,程序默认把内容保存到当前目录下。注意在上面的循环中我插入了一条print语句,这样一来方便了你日后debug需要防止死循环,二来免得你看到光标不动以为死机了,可以追踪进度。没什么事尽量降低爬取速度,不要浪费对方服务器资源。
##Gzip网页解压
一般来讲,到这里我们的网页内的图片就爬取好了,但是不巧,我们刚好碰到一个具有网页压缩技术的网站。是不是发现下载下来的图片是损坏的?那是因为在爬取过程中我们没有对内容进行解压。
Gzip是一种常见的数据压缩工具,通常用来压缩文件,后被引入网页压缩技术当中。很多时候当我们不能从网站上抓到正确的数据时,我们应该检查该网站是否使用了压缩技术,简单的方法有使用站长工具的[Gzip检测](http://tool.chinaz.com/Gzips/)
要解压网站,我们需要在开头导入gzip模块
import gzip
然后将urlopen返回的内容进行解压,再读取就能获得正常的数据
#!python
for each in girls_link:
a = open(str(girl)+'.jpg','wb')
b = urllib.request.Request(each,headers = {"Accept-Encoding": "gzip"})
c = urllib.request.urlopen(b)
d = gzip.GzipFile(fileobj = c)
e = d.read()
a.write(e)
print("No. %d Girl downloaded"%girl)
girl += 1
所以现在它可以称得上是一只爬虫了吗,not yet.
##网页跳转
不会爬的爬虫不能叫爬虫,爬虫具有一定的网页跳转能力。可以自动地移动到新的页面才能进行大规模地数据爬取。对于点进来看这篇文章的你们,显然一页的图片并不能满足你们嘿嘿嘿嘿嘿。。
我们来看首页,首页只展示了一部分图片,并没有预期中的2,3,4..分页页码出现。但是我们看到有个'更多妹子图'可以点击,点击之后,页面跳转到
http://www.youmzi.com/xg/
完全没有头绪,但是事实上第一页的页码**通常被隐藏**,所以我们需要进入下一页,
http://www.youmzi.com/xg/list_10_2.html
再下一页:
http://www.youmzi.com/xg/list_10_3.html
是不是找到了什么规律?我们试着用这个规律来返回到第一页:
http://www.youmzi.com/xg/list_10_1.html
没错,我们成功返回到了第一页,同时验证了第一页的页码**通常被隐藏**的真理。我们找到了规律,就可以按套路在外面加一个循环,首先先把我们前面的url变量从首页改为
url = "http://www.youmzi.com/xg/list_10_%d.html"%page
page就是我们的要爬的页面数字,初始值我们设为1,然后可以使用input来设定上限作为循环条件,这里我们使用while循环会更简单
#!python
pages = int(input("Please enter the pages you want: "))
page = 1
girl = 0
while page <= pages:
url = "http://www.youmzi.com/xg/list_10_%d.html"%page
requests = urllib.request.Request(url,headers =header)
.....
...
要注意的是,刚才在for循环那里设置的girl= 0一定要放在while前面,否则爬取图片的时候,第二页会覆盖第一页的内容。
再用函数包装一下,一个简单的抓妹子图的脚本就出来了
##完整代码
#!python
import urllib.request
import re
import time
import gzip
def youmeizi():
header = {
'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
girl = 0
pages = int(input("Please enter the pages you want: "))
girls_basket = []
page = 1
while page <= pages:
url = "http://www.youmzi.com/xg/list_10_%d.html"%page
requests = urllib.request.Request(url,headers =header)
opened = urllib.request.urlopen(requests)
content= opened.read().decode('gbk')
repattern = re.compile(r'<li>.*?<img src="(.*?)".*?</li>',re.S)
girls_link = re.findall(repattern,content)
for each in girls_link:
a = open(str(girl)+'.jpg','wb')
b = urllib.request.Request(each,headers = {"Accept-Encoding": "gzip"})
c = urllib.request.urlopen(b)
d = gzip.GzipFile(fileobj = c)
e = d.read()
a.write(e)
print("No. %d Girl downloaded"%girl)
girl += 1
time.sleep(1)
youmeizi()
最后再次重申一下,在练习爬虫的过程当中。尽量要做一个温柔的人,温柔对待服务器的人:
- 在练习爬虫的的时候,爬个几页十几页成功了就行,如果只是练习,没有必要几百页几百页地爬,造成对方服务器资源浪费。
- 在时间宽松的情况下,尽量添加sleep减少对方服务器压力
- 需要大规模爬的时候,尽量避开高峰期,在晚上服务器压力小的时候爬取可以避免对方服务器高负载。
况且那么多妹子图,
**看得过来嘛**
| PypiClean |
/DRAM_bio-1.4.6-py3-none-any.whl/mag_annotator/summarize_vgfs.py | import re
from os import path, mkdir
from functools import partial
from collections import defaultdict, Counter
from datetime import datetime
import warnings
import logging
import pandas as pd
import altair as alt
from mag_annotator.database_handler import DatabaseHandler
from mag_annotator.utils import setup_logger
from mag_annotator.summarize_genomes import get_ids_from_annotations_by_row, \
get_ids_from_annotations_all, get_ordered_uniques, check_columns
VOGDB_TYPE_NAMES = {'Xr': 'Viral replication genes', 'Xs': 'Viral structure genes',
'Xh': 'Viral genes with host benefits', 'Xp': 'Viral genes with viral benefits',
'Xu': 'Viral genes with unknown function', 'Xx': 'Viral hypothetical genes'}
VIRUS_STATS_COLUMNS = ['VIRSorter category', 'Circular', 'Prophage', 'Gene count', 'Strand switches',
'potential AMG count', 'Transposase present', 'Possible Non-Viral Contig']
VIRAL_DISTILLATE_COLUMNS = ['gene', 'scaffold', 'gene_id', 'gene_description', 'category', 'header',
'subheader', 'module', 'auxiliary_score', 'amg_flags']
VIRAL_LIQUOR_HEADERS = ['Category', 'Function', 'AMG Genes', 'Genes Present', 'Contig Name', 'Present in Contig']
HEATMAP_CELL_HEIGHT = 10
HEATMAP_CELL_WIDTH = 10
defaultdict_list = partial(defaultdict, list)
def add_custom_ms(annotations, distillate_form):
metabolic_genes = set(distillate_form.index)
new_amg_flags = list()
for gene, row in annotations.iterrows():
if 'M' in row['amg_flags']:
new_amg_flags.append(row['amg_flags'])
else:
gene_annotations = set(get_ids_from_annotations_all(pd.DataFrame(row).transpose()).keys())
if len(metabolic_genes & gene_annotations) > 0:
new_amg_flags.append(row['amg_flags'] + 'M')
else:
new_amg_flags.append(row['amg_flags'])
return new_amg_flags
def filter_to_amgs(annotations, max_aux=4, remove_transposons=True, remove_fs=False):
amgs = annotations[((annotations['amg_flags'].str.contains('M')) &
(annotations['amg_flags'].str.contains('V') == False) &
(annotations['amg_flags'].str.contains('A') == False) &
(annotations['amg_flags'].str.contains('P') == False) &
(annotations['auxiliary_score'] <= max_aux)
)]
if remove_transposons:
amgs = amgs[(amgs['amg_flags'].str.contains('T') == False)]
if remove_fs:
amgs = amgs[(amgs['amg_flags'].str.contains('F') == False)]
return amgs
def get_strand_switches(strandedness):
switches = 0
strand = strandedness[0]
for i in range(len(strandedness)):
if strandedness[i] != strand:
switches += 1
strand = strandedness[i]
return switches
def make_viral_stats_table(annotations, potential_amgs, groupby_column='scaffold'):
amg_counts = potential_amgs.groupby(groupby_column).size()
viral_stats_series = list()
for scaffold, frame in annotations.groupby(groupby_column):
# get virus information
virus_categories = re.findall(r'-cat_\d$', scaffold)
if len(virus_categories) > 0:
virus_category = int(virus_categories[0].split('_')[-1]) # viral category
virus_prophage = virus_category in [4, 5] # virus is prophage
else:
virus_category = None
virus_prophage = None
virus_circular = len(re.findall(r'-circular-cat_\d$', scaffold)) == 1 # virus is circular
virus_num_genes = len(frame) # number of genes on viral contig
virus_strand_switches = get_strand_switches(frame.strandedness) # number of strand switches
if scaffold in amg_counts:
virus_number_amgs = amg_counts[scaffold] # number of potential amgs
else:
virus_number_amgs = 0
virus_transposase_present = sum(frame.is_transposon) > 0 # transposase on contig
# virus_j_present = sum(['J' in i if not pd.isna(i) else False for i in frame.amg_flags]) > 0
virus_j_present = sum([i == 'Xh' if not pd.isna(i) else False
for i in frame['vogdb_categories']]) / frame.shape[0]
virus_data = pd.Series([virus_category, virus_circular, virus_prophage, virus_num_genes, virus_strand_switches,
virus_number_amgs, virus_transposase_present, virus_j_present],
index=VIRUS_STATS_COLUMNS, name=scaffold)
# get vogdb categories
# when vogdb has multiple categories only the first is taken
gene_counts = Counter([i.split(';')[0] for i in frame.vogdb_categories.replace('', 'Xx')])
named_gene_counts = {VOGDB_TYPE_NAMES[key]: value for key, value in gene_counts.items()}
gene_counts_series = pd.Series(named_gene_counts, name=scaffold)
viral_stats_series.append(pd.concat([virus_data, gene_counts_series]))
return pd.DataFrame(viral_stats_series).fillna(0)
def make_viral_distillate(potential_amgs, genome_summary_form, amg_database, logger):
"""Make a summary of what in our database makes something a AMG or likly AMG to dram"""
# Transform the amg database to make it more workable
def look_up_metabolic_info(search_db, match_db, match_db_name):
id_genes = set(match_db.index)
return (
(search_db
.assign(gene_id = lambda x: x['ids'].apply(lambda y: y & id_genes))
)[['gene_id', 'scaffold', 'auxiliary_score', 'amg_flags']]
.explode('gene_id')
.dropna(subset=['gene_id'])
.merge(match_db, how='left', left_on='gene_id', right_index=True)
.assign(gene_id_origin=match_db_name))
amg_database_frame = (amg_database
.melt(value_vars=['KO', 'EC', 'PFAM'],
id_vars=['gene', 'module', 'metabolism',
'reference', 'verified'],
value_name='gene_id')
.drop('variable', axis=1)
.assign(
gene_id=lambda x: x['gene_id'].apply(
lambda y: [i.strip() for i in str(y).split(';')]))
.explode('gene_id')
.dropna(subset='gene_id')
.set_index('gene_id')
.rename(columns = {'gene': 'gene_description'})
)
potential_amgs = potential_amgs.assign(ids=get_ids_from_annotations_by_row(potential_amgs))
metabolic_df = look_up_metabolic_info(potential_amgs, genome_summary_form, 'genome_summary_form')
amg_df = look_up_metabolic_info(potential_amgs, amg_database_frame, 'amg_database')
missing = list(set(potential_amgs.index) - (set(metabolic_df.index) | (set(amg_df.index)) ))
# evaluate what is mising
logger.warning(f"No distillate information found for {len(missing)} genes.")
logger.debug('\n'.join(missing))
summary = pd.concat([
metabolic_df,
amg_df,
potential_amgs.loc[missing, ['scaffold', 'auxiliary_score', 'amg_flags']]])
summary.reset_index(inplace=True, drop=False, names='gene')
return summary
def make_vgf_order(amgs):
amg_score_dict = {scaffold: ((1/frame['auxiliary_score']).sum(), len(frame))
for scaffold, frame in amgs.groupby('scaffold')}
amg_scores = pd.DataFrame.from_dict(amg_score_dict, columns=['AMG_score', 'AMG_count'],
orient='index')
return list(amg_scores.sort_values(['AMG_score', 'AMG_count'], ascending=False).index)
def make_amg_count_column(potential_amgs, vgf_order=None):
# build count column
amg_counts = pd.DataFrame(Counter(potential_amgs.scaffold).items(), columns=['Contig Name', 'Number'])
amg_counts['AMG Count'] = 'AMG Count'
text = alt.Chart(amg_counts, width=HEATMAP_CELL_WIDTH+10, height=HEATMAP_CELL_HEIGHT*len(amg_counts)).encode(
x=alt.X('AMG Count', title=None, axis=alt.Axis(labelLimit=0, labelAngle=90)),
y=alt.Y('Contig Name', title=None, axis=alt.Axis(labelLimit=0), sort=vgf_order),
text='Number'
).mark_text()
return text
def make_viral_functional_df(annotations, genome_summary_form, groupby_column='scaffold'):
# build dict of ids per genome
vgf_to_id_dict = defaultdict(defaultdict_list)
for vgf, frame in annotations.groupby(groupby_column, sort=False):
for gene, id_list in get_ids_from_annotations_by_row(frame).items():
for id_ in id_list:
vgf_to_id_dict[vgf][id_].append(gene)
# build long from data frame
rows = list()
for category, category_frame in genome_summary_form.groupby('sheet'):
for header, header_frame in category_frame.groupby('module'):
header_id_set = set(header_frame.index.to_list())
curr_rows = list()
for vgf, id_dict in vgf_to_id_dict.items():
present_in_bin = False
functions_present = list()
amgs_present = list()
for id_, amgs in id_dict.items():
if id_ in header_id_set:
present_in_bin = True
functions_present.append(id_)
amgs_present += amgs
curr_rows.append([category, header, ', '.join(amgs_present), ', '.join(functions_present), vgf,
present_in_bin])
if sum([i[-1] for i in curr_rows]) > 0:
rows += curr_rows
return pd.DataFrame(rows, columns=VIRAL_LIQUOR_HEADERS)
def make_viral_functional_heatmap(functional_df, vgf_order=None):
# build heatmaps
charts = list()
for i, (group, frame) in enumerate(functional_df.groupby('Category', sort=False)):
# set variables for chart
function_order = get_ordered_uniques(list(frame['Function']))
num_vgfs_in_frame = len(set(frame['Contig Name']))
chart_width = HEATMAP_CELL_WIDTH * len(function_order)
chart_height = HEATMAP_CELL_HEIGHT * num_vgfs_in_frame
# set up colors for chart
rect_colors = alt.Color('Present in Contig',
legend=alt.Legend(symbolType='square', values=[True, False]),
sort=[True, False],
scale=alt.Scale(range=['#e5f5f9', '#2ca25f']))
# define chart
# TODO: Figure out how to angle title to take up less space
c = alt.Chart(frame, title=alt.TitleParams(group)).encode(
x=alt.X('Function', title=None, axis=alt.Axis(labelLimit=0, labelAngle=90), sort=function_order),
y=alt.Y('Contig Name', axis=alt.Axis(title=None, labels=False, ticks=False), sort=vgf_order),
tooltip=[alt.Tooltip('Contig Name'),
alt.Tooltip('Category'),
alt.Tooltip('Function'),
alt.Tooltip('AMG Genes'),
alt.Tooltip('Genes Present')]
).mark_rect().encode(color=rect_colors).properties(
width=chart_width,
height=chart_height)
charts.append(c)
# merge and return
function_heatmap = alt.hconcat(*charts, spacing=5)
return function_heatmap
def summarize_vgfs(input_file, output_dir, groupby_column='scaffold', max_auxiliary_score=3,
remove_transposons=False, remove_fs=False, custom_distillate=None,
log_file_path:str=None, config_loc=None):
# make output folder
mkdir(output_dir)
if log_file_path is None:
log_file_path = path.join(output_dir, "distill.log")
logger = logging.getLogger('distillation_log')
setup_logger(logger, log_file_path)
logger.info(f"The log file is created at {log_file_path}")
# set up
annotations = pd.read_csv(input_file, sep='\t', index_col=0).fillna('')
database_handler = DatabaseHandler(logger, config_loc=config_loc)
if database_handler.config["dram_sheets"].get('genome_summary_form') is None:
raise ValueError('Genome summary form location must be set in order to summarize genomes')
genome_summary_form = pd.read_csv(database_handler.config['dram_sheets']['genome_summary_form'], sep='\t', index_col=0)
if custom_distillate is not None:
custom_distillate_form = pd.read_csv(custom_distillate, sep='\t', index_col=0)
genome_summary_form = pd.concat([genome_summary_form, custom_distillate_form])
# add M's from custom distillate
annotations['amg_flags'] = add_custom_ms(annotations, custom_distillate_form)
logger.info('Retrieved database locations and descriptions')
# get potential AMGs
potential_amgs = filter_to_amgs(annotations, max_aux=max_auxiliary_score,
remove_transposons=remove_transposons, remove_fs=remove_fs)
check_columns(potential_amgs, logger)
logger.info('Determined potential amgs')
# make distillate
viral_genome_stats = make_viral_stats_table(annotations, potential_amgs, groupby_column)
viral_genome_stats.to_csv(path.join(output_dir, 'vMAG_stats.tsv'), sep='\t')
logger.info('Calculated viral genome statistics')
viral_distillate = make_viral_distillate(
potential_amgs,
genome_summary_form,
pd.read_csv(database_handler.config["dram_sheets"].get('amg_database'), sep='\t'),
logger)
viral_distillate.to_csv(path.join(output_dir, 'amg_summary.tsv'), sep='\t', index=None)
logger.info('Generated AMG summary')
# make liquor
vgf_order = make_vgf_order(potential_amgs)
amg_column = make_amg_count_column(potential_amgs, vgf_order)
viral_function_df = make_viral_functional_df(potential_amgs, genome_summary_form, groupby_column=groupby_column)
viral_functional_heatmap = make_viral_functional_heatmap(viral_function_df, vgf_order)
product = alt.hconcat(amg_column, viral_functional_heatmap, spacing=5)
product.save(path.join(output_dir, 'product.html'))
logger.info('Generated product heatmap')
logger.info("Completed distillation") | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/utils/StaticLibraries.py | import os
from nuitka.containers.OrderedSets import OrderedSet
from nuitka.PythonFlavors import (
isAnacondaPython,
isDebianPackagePython,
isNuitkaPython,
)
from nuitka.PythonVersions import (
getPythonABI,
getSystemPrefixPath,
python_version,
python_version_str,
)
from nuitka.Tracing import general
from .FileOperations import getFileContentByLine, getFileList
from .Utils import getLinuxDistribution, isDebianBasedLinux, isWin32Windows
_ldconf_paths = None
_static_lib_cache = {}
def locateStaticLinkLibrary(dll_name):
if dll_name not in _static_lib_cache:
_static_lib_cache[dll_name] = _locateStaticLinkLibrary(dll_name)
return _static_lib_cache[dll_name]
def _locateStaticLinkLibrary(dll_name):
# singleton, pylint: disable=global-statement
#
global _ldconf_paths
if _ldconf_paths is None:
_ldconf_paths = OrderedSet()
for conf_filemame in getFileList("/etc/ld.so.conf.d", only_suffixes=".conf"):
for conf_line in getFileContentByLine(conf_filemame):
conf_line = conf_line.split("#", 1)[0]
conf_line = conf_line.strip()
if os.path.exists(conf_line):
_ldconf_paths.add(conf_line)
for ld_config_path in _ldconf_paths:
candidate = os.path.join(ld_config_path, "lib%s.a" % dll_name)
if os.path.exists(candidate):
return candidate
return None
_static_lib_python_path = False
def isDebianSuitableForStaticLinking():
dist_name, _base, dist_version = getLinuxDistribution()
if dist_name == "Debian":
if dist_version is None:
return True
try:
dist_version = tuple(int(x) for x in dist_version.split("."))
except ValueError:
# dist_version contains a non-numeric string such as "sid".
return True
return dist_version >= (10,)
else:
# TODO: Needs implementing potentially, Mint etc. are based
# on something that should be considered.
return True
def _getSystemStaticLibPythonPath():
# Return driven function with many cases, pylint: disable=too-many-branches,too-many-return-statements
sys_prefix = getSystemPrefixPath()
python_abi_version = python_version_str + getPythonABI()
if isNuitkaPython():
# Nuitka Python has this.
if isWin32Windows():
return os.path.join(
sys_prefix,
"libs",
"python" + python_abi_version.replace(".", "") + ".lib",
)
else:
return os.path.join(
sys_prefix,
"lib",
"libpython" + python_abi_version + ".a",
)
if isWin32Windows():
# The gcc used on Windows for Anaconda is far too old for winlibs gcc
# to use its library.
if isAnacondaPython():
return None
candidates = [
# Anaconda has this.
os.path.join(
sys_prefix,
"libs",
"libpython" + python_abi_version.replace(".", "") + ".dll.a",
),
# MSYS2 mingw64 Python has this.
os.path.join(
sys_prefix,
"lib",
"libpython" + python_abi_version + ".dll.a",
),
]
for candidate in candidates:
if os.path.exists(candidate):
return candidate
else:
candidate = os.path.join(
sys_prefix, "lib", "libpython" + python_abi_version + ".a"
)
if os.path.exists(candidate):
return candidate
# For Python2 this works. TODO: Figure out Debian and Python3.
if (
python_version < 0x300
and isDebianPackagePython()
and isDebianSuitableForStaticLinking()
):
candidate = locateStaticLinkLibrary("python" + python_abi_version)
else:
candidate = None
if candidate is not None and os.path.exists(candidate):
# Also check libz, can be missing
if not locateStaticLinkLibrary("z"):
general.warning(
"Error, missing 'libz-dev' installation needed for static lib-python."
)
return candidate
# This is not necessarily only for Python3 on Debian, but maybe others as well,
# but that's what's been tested.
if python_version >= 0x300 and isDebianPackagePython() and isDebianBasedLinux():
try:
import sysconfig
candidate = os.path.join(
sysconfig.get_config_var("LIBPL"),
"libpython" + python_abi_version + "-pic.a",
)
if os.path.exists(candidate):
return candidate
except ImportError:
# Cannot detect this properly for Python 2.6, but we don't care much
# about that anyway.
pass
return None
def getSystemStaticLibPythonPath():
global _static_lib_python_path # singleton, pylint: disable=global-statement
if _static_lib_python_path is False:
_static_lib_python_path = _getSystemStaticLibPythonPath()
return _static_lib_python_path | PypiClean |
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/tl/custom/forward.py | from .chatgetter import ChatGetter
from .sendergetter import SenderGetter
from ... import utils, helpers
class Forward(ChatGetter, SenderGetter):
"""
Custom class that encapsulates a :tl:`MessageFwdHeader` providing an
abstraction to easily access information like the original sender.
Remember that this class implements `ChatGetter
<telethon.tl.custom.chatgetter.ChatGetter>` and `SenderGetter
<telethon.tl.custom.sendergetter.SenderGetter>` which means you
have access to all their sender and chat properties and methods.
Attributes:
original_fwd (:tl:`MessageFwdHeader`):
The original :tl:`MessageFwdHeader` instance.
Any other attribute:
Attributes not described here are the same as those available
in the original :tl:`MessageFwdHeader`.
"""
def __init__(self, client, original, entities):
# Copy all the fields, not reference! It would cause memory cycles:
# self.original_fwd.original_fwd.original_fwd.original_fwd
# ...would be valid if we referenced.
self.__dict__.update(original.__dict__)
self.original_fwd = original
sender_id = sender = input_sender = peer = chat = input_chat = None
if original.from_id:
ty = helpers._entity_type(original.from_id)
if ty == helpers._EntityType.USER:
sender_id = utils.get_peer_id(original.from_id)
sender, input_sender = utils._get_entity_pair(
sender_id, entities, client._entity_cache
)
elif ty in (helpers._EntityType.CHAT, helpers._EntityType.CHANNEL):
peer = original.from_id
chat, input_chat = utils._get_entity_pair(
utils.get_peer_id(peer), entities, client._entity_cache
)
# This call resets the client
ChatGetter.__init__(self, peer, chat=chat, input_chat=input_chat)
SenderGetter.__init__(self, sender_id, sender=sender, input_sender=input_sender)
self._client = client
# TODO We could reload the message | PypiClean |
/7uring-1.0.0.tar.gz/7uring-1.0.0/turing/hashing/hashsha512.py | import hashlib, requests, bs4, re, os, sys
colors = {
'error':'\033[31;1m[x] ',
'success':'\033[36;1m[-] ',
'msg':'\033[33;1m[o] '
}
def stringToSHA512(string):
result = hashlib.sha512(string.encode()) #Create a SHA512 hash object
return result.hexdigest() #Return the required hexadecimal hash
def verifySHA512(sha512):
sha512Regex = re.compile(r'^[0-9a-f]{128}$') #Create a regex object
mo = sha512Regex.search(sha512) #Create a match object
if mo == None:
return False
else:
return True
def sha512ToString(sha512):
sha512 = sha512.lower()
if not verifySHA512(sha512):
print(colors['error'] + 'Invalid hash')
sys.exit()
else:
URL='https://md5decrypt.net/en/Sha512/' #Create a url
myobj = {
'hash':sha512,
'captcha13126':'',
'ahah13126':'8239e6d5b8e2f67f34cfbd3c77b05523',
'decrypt':'Decrypt'
}
res = requests.post(url=URL, data=myobj) #Send a POST request
res.raise_for_status()
source = res.content
soup = bs4.BeautifulSoup(source, 'lxml') #Create a beautiful soup bject
css_path = 'html body div#corps fieldset#answer b'
elem = soup.select(css_path) #Find the required element
try:
print(colors['msg'] + 'Cracked!\n' + colors['success'] + sha512 + ':' + elem[0].text) #Print the cracked string
except:
print(colors['msg'] + 'Hash not found in databases')
def sha512Brute(sha512, wordlist):
if os.path.exists(wordlist) and os.path.isfile(wordlist): #Check if the wordlist exists
if not os.path.isabs(wordlist): #Check if it is an absolute path
wordlist = os.path.abspath(wordlist)
else:
print(colors['error'] + 'Invalid path')
sys.exit()
if not verifySHA512(sha512): #Verify if hash is correct
print(colors['error'] + 'Invalid hash')
sys.exit()
with open(wordlist, 'r', errors='replace') as w:
words = w.readlines() #Store all words in a list
for word in words:
sha512String = stringToSHA512(word.rstrip())
if sha512String == sha512: #Check if hash matches
print(colors['msg'] + 'Cracked!')
print(colors['success'] + sha512 + ':' + word)
break
else:
print(colors['msg'] + 'Not found') | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/q/q.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("q",function(config){
var indentUnit=config.indentUnit,
curPunc,
keywords=buildRE(["abs","acos","aj","aj0","all","and","any","asc","asin","asof","atan","attr","avg","avgs","bin","by","ceiling","cols","cor","cos","count","cov","cross","csv","cut","delete","deltas","desc","dev","differ","distinct","div","do","each","ej","enlist","eval","except","exec","exit","exp","fby","fills","first","fkeys","flip","floor","from","get","getenv","group","gtime","hclose","hcount","hdel","hopen","hsym","iasc","idesc","if","ij","in","insert","inter","inv","key","keys","last","like","list","lj","load","log","lower","lsq","ltime","ltrim","mavg","max","maxs","mcount","md5","mdev","med","meta","min","mins","mmax","mmin","mmu","mod","msum","neg","next","not","null","or","over","parse","peach","pj","plist","prd","prds","prev","prior","rand","rank","ratios","raze","read0","read1","reciprocal","reverse","rload","rotate","rsave","rtrim","save","scan","select","set","setenv","show","signum","sin","sqrt","ss","ssr","string","sublist","sum","sums","sv","system","tables","tan","til","trim","txf","type","uj","ungroup","union","update","upper","upsert","value","var","view","views","vs","wavg","where","where","while","within","wj","wj1","wsum","xasc","xbar","xcol","xcols","xdesc","xexp","xgroup","xkey","xlog","xprev","xrank"]),
E=/[|/&^!+:\\\-*%$=~#;@><,?_\'\"\[\(\]\)\s{}]/;
function buildRE(w){return new RegExp("^("+w.join("|")+")$");}
function tokenBase(stream,state){
var sol=stream.sol(),c=stream.next();
curPunc=null;
if(sol)
if(c=="/")
return(state.tokenize=tokenLineComment)(stream,state);
else if(c=="\\"){
if(stream.eol()||/\s/.test(stream.peek()))
return stream.skipToEnd(),/^\\\s*$/.test(stream.current())?(state.tokenize=tokenCommentToEOF)(stream):state.tokenize=tokenBase,"comment";
else
return state.tokenize=tokenBase,"builtin";
}
if(/\s/.test(c))
return stream.peek()=="/"?(stream.skipToEnd(),"comment"):"whitespace";
if(c=='"')
return(state.tokenize=tokenString)(stream,state);
if(c=='`')
return stream.eatWhile(/[A-Za-z\d_:\/.]/),"symbol";
if(("."==c&&/\d/.test(stream.peek()))||/\d/.test(c)){
var t=null;
stream.backUp(1);
if(stream.match(/^\d{4}\.\d{2}(m|\.\d{2}([DT](\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)?)?)/)
|| stream.match(/^\d+D(\d{2}(:\d{2}(:\d{2}(\.\d{1,9})?)?)?)/)
|| stream.match(/^\d{2}:\d{2}(:\d{2}(\.\d{1,9})?)?/)
|| stream.match(/^\d+[ptuv]{1}/))
t="temporal";
else if(stream.match(/^0[NwW]{1}/)
|| stream.match(/^0x[\da-fA-F]*/)
|| stream.match(/^[01]+[b]{1}/)
|| stream.match(/^\d+[chijn]{1}/)
|| stream.match(/-?\d*(\.\d*)?(e[+\-]?\d+)?(e|f)?/))
t="number";
return(t&&(!(c=stream.peek())||E.test(c)))?t:(stream.next(),"error");
}
if(/[A-Za-z]|\./.test(c))
return stream.eatWhile(/[A-Za-z._\d]/),keywords.test(stream.current())?"keyword":"variable";
if(/[|/&^!+:\\\-*%$=~#;@><\.,?_\']/.test(c))
return null;
if(/[{}\(\[\]\)]/.test(c))
return null;
return"error";
}
function tokenLineComment(stream,state){
return stream.skipToEnd(),/\/\s*$/.test(stream.current())?(state.tokenize=tokenBlockComment)(stream,state):(state.tokenize=tokenBase),"comment";
}
function tokenBlockComment(stream,state){
var f=stream.sol()&&stream.peek()=="\\";
stream.skipToEnd();
if(f&&/^\\\s*$/.test(stream.current()))
state.tokenize=tokenBase;
return"comment";
}
function tokenCommentToEOF(stream){return stream.skipToEnd(),"comment";}
function tokenString(stream,state){
var escaped=false,next,end=false;
while((next=stream.next())){
if(next=="\""&&!escaped){end=true;break;}
escaped=!escaped&&next=="\\";
}
if(end)state.tokenize=tokenBase;
return"string";
}
function pushContext(state,type,col){state.context={prev:state.context,indent:state.indent,col:col,type:type};}
function popContext(state){state.indent=state.context.indent;state.context=state.context.prev;}
return{
startState:function(){
return{tokenize:tokenBase,
context:null,
indent:0,
col:0};
},
token:function(stream,state){
if(stream.sol()){
if(state.context&&state.context.align==null)
state.context.align=false;
state.indent=stream.indentation();
}
//if (stream.eatSpace()) return null;
var style=state.tokenize(stream,state);
if(style!="comment"&&state.context&&state.context.align==null&&state.context.type!="pattern"){
state.context.align=true;
}
if(curPunc=="(")pushContext(state,")",stream.column());
else if(curPunc=="[")pushContext(state,"]",stream.column());
else if(curPunc=="{")pushContext(state,"}",stream.column());
else if(/[\]\}\)]/.test(curPunc)){
while(state.context&&state.context.type=="pattern")popContext(state);
if(state.context&&curPunc==state.context.type)popContext(state);
}
else if(curPunc=="."&&state.context&&state.context.type=="pattern")popContext(state);
else if(/atom|string|variable/.test(style)&&state.context){
if(/[\}\]]/.test(state.context.type))
pushContext(state,"pattern",stream.column());
else if(state.context.type=="pattern"&&!state.context.align){
state.context.align=true;
state.context.col=stream.column();
}
}
return style;
},
indent:function(state,textAfter){
var firstChar=textAfter&&textAfter.charAt(0);
var context=state.context;
if(/[\]\}]/.test(firstChar))
while (context&&context.type=="pattern")context=context.prev;
var closing=context&&firstChar==context.type;
if(!context)
return 0;
else if(context.type=="pattern")
return context.col;
else if(context.align)
return context.col+(closing?0:1);
else
return context.indent+(closing?0:indentUnit);
}
};
});
CodeMirror.defineMIME("text/x-q","q");
}); | PypiClean |
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/misopy/sashimi_plot/sashimi_plot.py | import os
import sys
import glob
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
# Use PDF backend
try: matplotlib.use("pdf")
except Exception: pass
from scipy import *
from numpy import *
import pysam
import shelve
import misopy
import misopy.gff_utils as gff_utils
import misopy.pe_utils as pe_utils
from misopy.parse_csv import csv2dictlist_raw
from misopy.samples_utils import load_samples
from misopy.sashimi_plot.Sashimi import Sashimi
from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter
from misopy.sashimi_plot.plot_utils.plotting import *
from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file
import matplotlib.pyplot as plt
from matplotlib import rc
def plot_bf_dist(bf_filename, settings_filename, output_dir,
max_bf=1e12):
"""
Plot a Bayes factor distribution from a .miso_bf file.
"""
if not bf_filename.endswith(".miso_bf"):
print "WARNING: %s does not end in .miso_bf, are you sure it is the " \
"output of a MISO samples comparison?" %(bf_filename)
# Load BF data
data, h = csv2dictlist_raw(bf_filename)
plot_name = os.path.basename(bf_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
# Setup the figure
sashimi_obj.setup_figure()
# Matrix of bayes factors and delta psi pairs
bfs_and_deltas = []
for event in data:
bf = event['bayes_factor']
delta_psi = event['diff']
if type(bf) == str and "," in bf:
print "WARNING: %s is a multi-isoform event, skipping..." \
%(event)
continue
else:
# Impose upper limit on Bayes factor
bf = min(1e12, float(bf))
delta_psi = float(delta_psi)
bfs_and_deltas.append([bf, delta_psi])
bfs_and_deltas = array(bfs_and_deltas)
num_events = len(bfs_and_deltas)
print "Loaded %d event comparisons." %(num_events)
output_filename = sashimi_obj.output_filename
print "Plotting Bayes factors distribution"
print " - Output filename: %s" %(output_filename)
bf_thresholds = settings["bf_thresholds"]
bar_color = settings["bar_color"]
min_bf_thresh = min(bf_thresholds)
num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh)
for thresh in bf_thresholds:
if type(thresh) != int:
print "Error: BF thresholds must be integers."
#sys.exit(1)
print "Using BF thresholds: "
print bf_thresholds
print "Using bar color: %s" %(bar_color)
plot_cumulative_bars(bfs_and_deltas[:, 0],
bf_thresholds,
bar_color=bar_color,
logged=True)
plt.xticks(bf_thresholds)
c = 1
plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c])
plt.title("Bayes factor distributions\n(using %d/%d events)" \
%(num_events_used, num_events))
plt.xlabel("Bayes factor thresh.")
plt.ylabel("No. events")
sashimi_obj.save_plot()
def plot_event(event_name, pickle_dir, settings_filename,
output_dir,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Visualize read densities across the exons and junctions
of a given MISO alternative RNA processing event.
Also plots MISO estimates and Psi values.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
if not os.path.isdir(pickle_dir):
print "Error: event pickle directory %s not found." %(pickle_dir)
#sys.exit(1)
# Retrieve the full pickle filename
genes_filename = os.path.join(pickle_dir,
"genes_to_filenames.shelve")
# Check that file basename exists
if len(glob.glob("%s*" %(genes_filename))) == 0:
raise Exception, "Cannot find file %s. Are you sure the events " \
"were indexed with the latest version of index_gff.py?" \
%(genes_filename)
event_to_filenames = shelve.open(genes_filename)
if event_name not in event_to_filenames:
raise Exception, "Event %s not found in pickled directory %s. " \
"Are you sure this is the right directory for the event?" \
%(event_name, pickle_dir)
pickle_filename = event_to_filenames[event_name]
if pickle_dir not in pickle_filename:
import string
pickle_filename = string.replace(pickle_filename,'\\','/')
if 'sashimi_index' in pickle_filename:
pickle_filename = pickle_dir + string.split(pickle_filename,'sashimi_index')[1]
else:
pickle_filename = pickle_dir + string.split(pickle_filename,'trial_index')[1]
import string
#pickle_filename = string.replace(pickle_filename,' 1','')
if no_posteriors:
print "Asked to not plot MISO posteriors."
plot_density_from_file(settings_filename, pickle_filename, event_name,
output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
plot_name = os.path.basename(insert_len_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
num_bins = settings["insert_len_bins"]
output_filename = sashimi_obj.output_filename
sashimi_obj.setup_figure()
s = plt.subplot(1, 1, 1)
print "Plotting insert length distribution..."
print " - Distribution file: %s" %(insert_len_filename)
print " - Output plot: %s" %(output_filename)
insert_dist, params = pe_utils.load_insert_len(insert_len_filename)
mean, sdev, dispersion, num_pairs \
= pe_utils.compute_insert_len_stats(insert_dist)
print "min insert: %.1f" %(min(insert_dist))
print "max insert: %.1f" %(max(insert_dist))
plt.title("%s (%d read-pairs)" \
%(plot_name,
num_pairs),
fontsize=10)
plt.hist(insert_dist, bins=num_bins, color='k',
edgecolor="#ffffff", align='mid')
axes_square(s)
ymin, ymax = s.get_ylim()
plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \
%(round(mean, 2),
round(sdev, 2),
round(dispersion, 2)),
horizontalalignment='left',
verticalalignment='top',
bbox=dict(edgecolor='k', facecolor="#ffffff",
alpha=0.5),
fontsize=10,
transform=s.transAxes)
plt.xlabel("Insert length (nt)")
plt.ylabel("No. read pairs")
sashimi_obj.save_plot()
def greeting():
print "Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \
"Part of the MISO (Mixture of Isoforms model) framework."
print "See --help for usage.\n"
print "Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n"
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None,
help="Plot the insert length distribution from a given insert length (*.insert_len) "
"filename. Second argument is a settings file name.")
parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None,
help="Plot Bayes factor distributon. Takes the arguments: "
"(1) Bayes factor filename (*.miso_bf) filename, "
"(2) a settings filename.")
parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None,
help="Plot read densities and MISO inferences for a given alternative event. "
"Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 "
"annotation file, (2) directory where indexed GFF annotation is (output of "
"index_gff.py), (3) path to plotting settings file.")
parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true",
help="If given this argument, MISO posterior estimates are not plotted.")
parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1,
help="Title of plot: a string that will be displayed at top of plot. Example: " \
"--plot-title \"My favorite gene\".")
parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1,
help="Plot label. If given, plot will be saved in the output directory as " \
"the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \
"Example: --plot-label my_gene")
parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None,
help="Output directory.")
(options, args) = parser.parse_args()
if options.plot_event is None:
greeting()
#sys.exit(1)
if options.output_dir == None:
print "Error: need --output-dir"
#sys.exit(1)
output_dir = os.path.abspath(os.path.expanduser(options.output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
no_posteriors = options.no_posteriors
plot_title = options.plot_title
plot_label = options.plot_label
if options.plot_insert_len != None:
insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1]))
plot_insert_len(insert_len_filename, settings_filename, output_dir)
if options.plot_bf_dist != None:
bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1]))
plot_bf_dist(bf_filename, settings_filename, output_dir)
if options.plot_event != None:
event_name = options.plot_event[0]
pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2]))
plot_event(event_name, pickle_dir, settings_filename, output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
if __name__ == '__main__':
main() | PypiClean |
/125softNLP-0.0.1-py3-none-any.whl/pysoftNLP/ner/kashgari/tasks/classification/models.py |
# author: BrikerMan
# contact: [email protected]
# blog: https://eliyar.biz
# file: models.py
# time: 2019-05-22 11:26
import logging
import tensorflow as tf
from typing import Dict, Any
from kashgari.layers import L, AttentionWeightedAverageLayer, KMaxPoolingLayer
from kashgari.tasks.classification.base_model import BaseClassificationModel
class BiLSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_lstm': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_lstm = L.Bidirectional(L.LSTM(**config['layer_bi_lstm']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_lstm(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'layer_bi_gru': {
'units': 128,
'return_sequences': False
},
'layer_dense': {
'activation': 'softmax'
}
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_bi_gru = L.Bidirectional(L.GRU(**config['layer_bi_gru']))
layer_dense = L.Dense(output_dim, **config['layer_dense'])
tensor = layer_bi_gru(embed_model.output)
output_tensor = layer_dense(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, output_tensor)
class CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv1d_layer': {
'filters': 128,
'kernel_size': 5,
'activation': 'relu'
},
'max_pool_layer': {},
'dense_layer': {
'units': 64,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
# build model structure in sequent way
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv1d_layer']))
layers_seq.append(L.GlobalMaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.Dense(**config['dense_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_LSTM_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'lstm_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.LSTM(**config['lstm_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class CNN_GRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'conv_layer': {
'filters': 32,
'kernel_size': 3,
'padding': 'same',
'activation': 'relu'
},
'max_pool_layer': {
'pool_size': 2
},
'gru_layer': {
'units': 100
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_seq = []
layers_seq.append(L.Conv1D(**config['conv_layer']))
layers_seq.append(L.MaxPooling1D(**config['max_pool_layer']))
layers_seq.append(L.GRU(**config['gru_layer']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class AVCNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'conv_0': {
'filters': 300,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 300,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 300,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 300,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
# ---
'attn_0': {},
'avg_0': {},
'maxpool_0': {},
# ---
'maxpool_1': {},
'attn_1': {},
'avg_1': {},
# ---
'maxpool_2': {},
'attn_2': {},
'avg_2': {},
# ---
'maxpool_3': {},
'attn_3': {},
'avg_3': {},
# ---
'v_col3': {
# 'mode': 'concat',
'axis': 1
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.7
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_view = L.Concatenate(**config['v_col3'])
layer_allviews = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_matrix_sensor = []
for tensor_conv in tensors_conv:
tensor_sensors = []
tensor_sensors = [layer_sensor(tensor_conv) for layer_sensor in layers_sensor]
# tensor_sensors.append(L.GlobalMaxPooling1D()(tensor_conv))
# tensor_sensors.append(AttentionWeightedAverageLayer()(tensor_conv))
# tensor_sensors.append(L.GlobalAveragePooling1D()(tensor_conv))
tensors_matrix_sensor.append(tensor_sensors)
tensors_views = [layer_view(list(tensors)) for tensors in zip(*tensors_matrix_sensor)]
tensor = layer_allviews(tensors_views)
# tensors_v_cols = [L.concatenate(tensors, **config['v_col3']) for tensors
# in zip(*tensors_matrix_sensor)]
# tensor = L.concatenate(tensors_v_cols, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class KMax_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'conv_0': {
'filters': 180,
'kernel_size': 1,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_1': {
'filters': 180,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_2': {
'filters': 180,
'kernel_size': 3,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'conv_3': {
'filters': 180,
'kernel_size': 4,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu'
},
'maxpool_i4': {
'k': 3
},
'merged_tensor': {
# 'mode': 'concat',
'axis': 1
},
'dropout': {
'rate': 0.6
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layer_embed_dropout = L.SpatialDropout1D(**config['spatial_dropout'])
layers_conv = [L.Conv1D(**config[f'conv_{i}']) for i in range(4)]
layers_sensor = [KMaxPoolingLayer(**config['maxpool_i4']),
L.Flatten()]
layer_concat = L.Concatenate(**config['merged_tensor'])
layers_seq = []
layers_seq.append(L.Dropout(**config['dropout']))
layers_seq.append(L.Dense(**config['dense']))
layers_seq.append(L.Dense(output_dim, **config['activation_layer']))
embed_tensor = layer_embed_dropout(embed_model.output)
tensors_conv = [layer_conv(embed_tensor) for layer_conv in layers_conv]
tensors_sensor = []
for tensor_conv in tensors_conv:
tensor_sensor = tensor_conv
for layer_sensor in layers_sensor:
tensor_sensor = layer_sensor(tensor_sensor)
tensors_sensor.append(tensor_sensor)
tensor = layer_concat(tensors_sensor)
# tensor = L.concatenate(tensors_sensor, **config['merged_tensor'])
for layer in layers_seq:
tensor = layer(tensor)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor)
class R_CNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.2
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'conv_0': {
'filters': 128,
'kernel_size': 2,
'kernel_initializer': 'normal',
'padding': 'valid',
'activation': 'relu',
'strides': 1
},
'maxpool': {},
'attn': {},
'average': {},
'concat': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 120,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rcnn_seq = []
layers_rcnn_seq.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rcnn_seq.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rcnn_seq.append(L.Conv1D(**config['conv_0']))
layers_sensor = []
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_concat = L.Concatenate(**config['concat'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor = embed_model.output
for layer in layers_rcnn_seq:
tensor = layer(tensor)
tensors_sensor = [layer(tensor) for layer in layers_sensor]
tensor_output = layer_concat(tensors_sensor)
# tensor_output = L.concatenate(tensor_sensors, **config['concat'])
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 60,
'return_sequences': True
},
'rnn_1': {
'units': 60,
'return_sequences': True
},
'concat_rnn': {
'axis': 2
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 144,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn0 = []
layers_rnn0.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn0.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layer_bi_rnn1 = L.Bidirectional(L.GRU(**config['rnn_1']))
layer_concat = L.Concatenate(**config['concat_rnn'])
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn0:
tensor_rnn = layer(tensor_rnn)
tensor_concat = layer_concat([tensor_rnn, layer_bi_rnn1(tensor_rnn)])
tensor_sensors = [layer(tensor_concat) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_BiGRU_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.15
},
'rnn_0': {
'units': 64,
'return_sequences': True
},
'dropout_rnn': {
'rate': 0.35
},
'rnn_1': {
'units': 64,
'return_sequences': True
},
'last': {},
'maxpool': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout': {
'rate': 0.5
},
'dense': {
'units': 72,
'activation': 'relu'
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.Dropout(**config['dropout_rnn']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
class Dropout_AVRNN_Model(BaseClassificationModel):
@classmethod
def get_default_hyper_parameters(cls) -> Dict[str, Dict[str, Any]]:
return {
'spatial_dropout': {
'rate': 0.25
},
'rnn_0': {
'units': 56,
'return_sequences': True
},
'rnn_dropout': {
'rate': 0.3
},
'rnn_1': {
'units': 56,
'return_sequences': True
},
'last': {},
'maxpool': {},
'attn': {},
'average': {},
'all_views': {
'axis': 1
},
'dropout_0': {
'rate': 0.5
},
'dense': {
'units': 128,
'activation': 'relu'
},
'dropout_1': {
'rate': 0.25
},
'activation_layer': {
'activation': 'softmax'
},
}
def build_model_arc(self):
output_dim = len(self.processor.label2idx)
config = self.hyper_parameters
embed_model = self.embedding.embed_model
layers_rnn = []
layers_rnn.append(L.SpatialDropout1D(**config['spatial_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_0'])))
layers_rnn.append(L.SpatialDropout1D(**config['rnn_dropout']))
layers_rnn.append(L.Bidirectional(L.GRU(**config['rnn_1'])))
layers_sensor = []
layers_sensor.append(L.Lambda(lambda t: t[:, -1], name='last'))
layers_sensor.append(L.GlobalMaxPooling1D())
layers_sensor.append(AttentionWeightedAverageLayer())
layers_sensor.append(L.GlobalAveragePooling1D())
layer_allviews = L.Concatenate(**config['all_views'])
layers_full_connect = []
layers_full_connect.append(L.Dropout(**config['dropout_0']))
layers_full_connect.append(L.Dense(**config['dense']))
layers_full_connect.append(L.Dropout(**config['dropout_1']))
layers_full_connect.append(L.Dense(output_dim, **config['activation_layer']))
tensor_rnn = embed_model.output
for layer in layers_rnn:
tensor_rnn = layer(tensor_rnn)
tensor_sensors = [layer(tensor_rnn) for layer in layers_sensor]
tensor_output = layer_allviews(tensor_sensors)
for layer in layers_full_connect:
tensor_output = layer(tensor_output)
self.tf_model = tf.keras.Model(embed_model.inputs, tensor_output)
if __name__ == "__main__":
print(BiLSTM_Model.get_default_hyper_parameters())
logging.basicConfig(level=logging.DEBUG)
from kashgari.corpus import SMP2018ECDTCorpus
x, y = SMP2018ECDTCorpus.load_data()
import kashgari
from kashgari.processors.classification_processor import ClassificationProcessor
from kashgari.embeddings import BareEmbedding
processor = ClassificationProcessor(multi_label=False)
embed = BareEmbedding(task=kashgari.CLASSIFICATION, sequence_length=30, processor=processor)
m = BiLSTM_Model(embed)
# m.build_model(x, y)
m.fit(x, y, epochs=2)
print(m.predict(x[:10]))
# m.evaluate(x, y)
print(m.predict_top_k_class(x[:10])) | PypiClean |
/FLAML-2.0.2-py3-none-any.whl/flaml/tune/searcher/suggestion.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This source file is adapted here because ray does not fully support Windows.
# Copyright (c) Microsoft Corporation.
import time
import functools
import warnings
import copy
import numpy as np
import logging
from typing import Any, Dict, Optional, Union, List, Tuple, Callable
import pickle
from .variant_generator import parse_spec_vars
from ..sample import (
Categorical,
Domain,
Float,
Integer,
LogUniform,
Quantized,
Uniform,
)
from ..trial import flatten_dict, unflatten_dict
from collections import defaultdict
logger = logging.getLogger(__name__)
UNRESOLVED_SEARCH_SPACE = str(
"You passed a `{par}` parameter to {cls} that contained unresolved search "
"space definitions. {cls} should however be instantiated with fully "
"configured search spaces only. To use Ray Tune's automatic search space "
"conversion, pass the space definition as part of the `config` argument "
"to `tune.run()` instead."
)
UNDEFINED_SEARCH_SPACE = str(
"Trying to sample a configuration from {cls}, but no search "
"space has been defined. Either pass the `{space}` argument when "
"instantiating the search algorithm, or pass a `config` to "
"`tune.run()`."
)
UNDEFINED_METRIC_MODE = str(
"Trying to sample a configuration from {cls}, but the `metric` "
"({metric}) or `mode` ({mode}) parameters have not been set. "
"Either pass these arguments when instantiating the search algorithm, "
"or pass them to `tune.run()`."
)
class Searcher:
"""Abstract class for wrapping suggesting algorithms.
Custom algorithms can extend this class easily by overriding the
`suggest` method provide generated parameters for the trials.
Any subclass that implements ``__init__`` must also call the
constructor of this class: ``super(Subclass, self).__init__(...)``.
To track suggestions and their corresponding evaluations, the method
`suggest` will be passed a trial_id, which will be used in
subsequent notifications.
Not all implementations support multi objectives.
Args:
metric (str or list): The training result objective value attribute. If
list then list of training result objective value attributes
mode (str or list): If string One of {min, max}. If list then
list of max and min, determines whether objective is minimizing
or maximizing the metric attribute. Must match type of metric.
```python
class ExampleSearch(Searcher):
def __init__(self, metric="mean_loss", mode="min", **kwargs):
super(ExampleSearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self.optimizer = Optimizer()
self.configurations = {}
def suggest(self, trial_id):
configuration = self.optimizer.query()
self.configurations[trial_id] = configuration
def on_trial_complete(self, trial_id, result, **kwargs):
configuration = self.configurations[trial_id]
if result and self.metric in result:
self.optimizer.update(configuration, result[self.metric])
tune.run(trainable_function, search_alg=ExampleSearch())
```
"""
FINISHED = "FINISHED"
CKPT_FILE_TMPL = "searcher-state-{}.pkl"
def __init__(
self,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
self._metric = metric
self._mode = mode
if not mode or not metric:
# Early return to avoid assertions
return
assert isinstance(metric, type(mode)), "metric and mode must be of the same type"
if isinstance(mode, str):
assert mode in ["min", "max"], "if `mode` is a str must be 'min' or 'max'!"
elif isinstance(mode, list):
assert len(mode) == len(metric), "Metric and mode must be the same length"
assert all(mod in ["min", "max", "obs"] for mod in mode), "All of mode must be 'min' or 'max' or 'obs'!"
else:
raise ValueError("Mode must either be a list or string")
def set_search_properties(self, metric: Optional[str], mode: Optional[str], config: Dict) -> bool:
"""Pass search properties to searcher.
This method acts as an alternative to instantiating search algorithms
with their own specific search spaces. Instead they can accept a
Tune config through this method. A searcher should return ``True``
if setting the config was successful, or ``False`` if it was
unsuccessful, e.g. when the search space has already been set.
Args:
metric (str): Metric to optimize
mode (str): One of ["min", "max"]. Direction to optimize.
config (dict): Tune config dict.
"""
return False
def on_trial_result(self, trial_id: str, result: Dict):
"""Optional notification for result during training.
Note that by default, the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
Args:
trial_id (str): A unique string ID for the trial.
result (dict): Dictionary of metrics for current training progress.
Note that the result dict may include NaNs or
may not include the optimization metric. It is up to the
subclass implementation to preprocess the result to
avoid breaking the optimization process.
"""
pass
@property
def metric(self) -> str:
"""The training result objective value attribute."""
return self._metric
@property
def mode(self) -> str:
"""Specifies if minimizing or maximizing the metric."""
return self._mode
class ConcurrencyLimiter(Searcher):
"""A wrapper algorithm for limiting the number of concurrent trials.
Args:
searcher (Searcher): Searcher object that the
ConcurrencyLimiter will manage.
max_concurrent (int): Maximum concurrent samples from the underlying
searcher.
batch (bool): Whether to wait for all concurrent samples
to finish before updating the underlying searcher.
Example:
```python
from ray.tune.suggest import ConcurrencyLimiter # ray version < 2
search_alg = HyperOptSearch(metric="accuracy")
search_alg = ConcurrencyLimiter(search_alg, max_concurrent=2)
tune.run(trainable, search_alg=search_alg)
```
"""
def __init__(self, searcher: Searcher, max_concurrent: int, batch: bool = False):
assert type(max_concurrent) is int and max_concurrent > 0
self.searcher = searcher
self.max_concurrent = max_concurrent
self.batch = batch
self.live_trials = set()
self.cached_results = {}
super(ConcurrencyLimiter, self).__init__(metric=self.searcher.metric, mode=self.searcher.mode)
def suggest(self, trial_id: str) -> Optional[Dict]:
assert trial_id not in self.live_trials, f"Trial ID {trial_id} must be unique: already found in set."
if len(self.live_trials) >= self.max_concurrent:
logger.debug(
f"Not providing a suggestion for {trial_id} due to " "concurrency limit: %s/%s.",
len(self.live_trials),
self.max_concurrent,
)
return
suggestion = self.searcher.suggest(trial_id)
if suggestion not in (None, Searcher.FINISHED):
self.live_trials.add(trial_id)
return suggestion
def on_trial_complete(self, trial_id: str, result: Optional[Dict] = None, error: bool = False):
if trial_id not in self.live_trials:
return
elif self.batch:
self.cached_results[trial_id] = (result, error)
if len(self.cached_results) == self.max_concurrent:
# Update the underlying searcher once the
# full batch is completed.
for trial_id, (result, error) in self.cached_results.items():
self.searcher.on_trial_complete(trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
self.cached_results = {}
else:
return
else:
self.searcher.on_trial_complete(trial_id, result=result, error=error)
self.live_trials.remove(trial_id)
def get_state(self) -> Dict:
state = self.__dict__.copy()
del state["searcher"]
return copy.deepcopy(state)
def set_state(self, state: Dict):
self.__dict__.update(state)
def save(self, checkpoint_path: str):
self.searcher.save(checkpoint_path)
def restore(self, checkpoint_path: str):
self.searcher.restore(checkpoint_path)
def on_pause(self, trial_id: str):
self.searcher.on_pause(trial_id)
def on_unpause(self, trial_id: str):
self.searcher.on_unpause(trial_id)
def set_search_properties(self, metric: Optional[str], mode: Optional[str], config: Dict) -> bool:
return self.searcher.set_search_properties(metric, mode, config)
try:
import optuna as ot
from optuna.distributions import BaseDistribution as OptunaDistribution
from optuna.samplers import BaseSampler
from optuna.trial import TrialState as OptunaTrialState
from optuna.trial import Trial as OptunaTrial
except ImportError:
ot = None
OptunaDistribution = None
BaseSampler = None
OptunaTrialState = None
OptunaTrial = None
DEFAULT_METRIC = "_metric"
TRAINING_ITERATION = "training_iteration"
DEFINE_BY_RUN_WARN_THRESHOLD_S = 1
def validate_warmstart(
parameter_names: List[str],
points_to_evaluate: List[Union[List, Dict]],
evaluated_rewards: List,
validate_point_name_lengths: bool = True,
):
"""Generic validation of a Searcher's warm start functionality.
Raises exceptions in case of type and length mismatches between
parameters.
If ``validate_point_name_lengths`` is False, the equality of lengths
between ``points_to_evaluate`` and ``parameter_names`` will not be
validated.
"""
if points_to_evaluate:
if not isinstance(points_to_evaluate, list):
raise TypeError("points_to_evaluate expected to be a list, got {}.".format(type(points_to_evaluate)))
for point in points_to_evaluate:
if not isinstance(point, (dict, list)):
raise TypeError(f"points_to_evaluate expected to include list or dict, " f"got {point}.")
if validate_point_name_lengths and (not len(point) == len(parameter_names)):
raise ValueError(
"Dim of point {}".format(point)
+ " and parameter_names {}".format(parameter_names)
+ " do not match."
)
if points_to_evaluate and evaluated_rewards:
if not isinstance(evaluated_rewards, list):
raise TypeError("evaluated_rewards expected to be a list, got {}.".format(type(evaluated_rewards)))
if not len(evaluated_rewards) == len(points_to_evaluate):
raise ValueError(
"Dim of evaluated_rewards {}".format(evaluated_rewards)
+ " and points_to_evaluate {}".format(points_to_evaluate)
+ " do not match."
)
class _OptunaTrialSuggestCaptor:
"""Utility to capture returned values from Optuna's suggest_ methods.
This will wrap around the ``optuna.Trial` object and decorate all
`suggest_` callables with a function capturing the returned value,
which will be saved in the ``captured_values`` dict.
"""
def __init__(self, ot_trial: OptunaTrial) -> None:
self.ot_trial = ot_trial
self.captured_values: Dict[str, Any] = {}
def _get_wrapper(self, func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
# name is always the first arg for suggest_ methods
name = kwargs.get("name", args[0])
ret = func(*args, **kwargs)
self.captured_values[name] = ret
return ret
return wrapper
def __getattr__(self, item_name: str) -> Any:
item = getattr(self.ot_trial, item_name)
if item_name.startswith("suggest_") and callable(item):
return self._get_wrapper(item)
return item
class OptunaSearch(Searcher):
"""A wrapper around Optuna to provide trial suggestions.
`Optuna <https://optuna.org/>`_ is a hyperparameter optimization library.
In contrast to other libraries, it employs define-by-run style
hyperparameter definitions.
This Searcher is a thin wrapper around Optuna's search algorithms.
You can pass any Optuna sampler, which will be used to generate
hyperparameter suggestions.
Multi-objective optimization is supported.
Args:
space: Hyperparameter search space definition for
Optuna's sampler. This can be either a dict with
parameter names as keys and ``optuna.distributions`` as values,
or a Callable - in which case, it should be a define-by-run
function using ``optuna.trial`` to obtain the hyperparameter
values. The function should return either a dict of
constant values with names as keys, or None.
For more information, see https://optuna.readthedocs.io\
/en/stable/tutorial/10_key_features/002_configurations.html.
Warning - No actual computation should take place in the define-by-run
function. Instead, put the training logic inside the function
or class trainable passed to ``tune.run``.
metric: The training result objective value attribute. If
None but a mode was passed, the anonymous metric ``_metric``
will be used per default. Can be a list of metrics for
multi-objective optimization.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute. Can be a list of
modes for multi-objective optimization (corresponding to
``metric``).
points_to_evaluate: Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
sampler: Optuna sampler used to
draw hyperparameter configurations. Defaults to ``MOTPESampler``
for multi-objective optimization with Optuna<2.9.0, and
``TPESampler`` in every other case.
Warning: Please note that with Optuna 2.10.0 and earlier
default ``MOTPESampler``/``TPESampler`` suffer
from performance issues when dealing with a large number of
completed trials (approx. >100). This will manifest as
a delay when suggesting new configurations.
This is an Optuna issue and may be fixed in a future
Optuna release.
seed: Seed to initialize sampler with. This parameter is only
used when ``sampler=None``. In all other cases, the sampler
you pass should be initialized with the seed already.
evaluated_rewards: If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate.
Warning - When using ``evaluated_rewards``, the search space ``space``
must be provided as a dict with parameter names as
keys and ``optuna.distributions`` instances as values. The
define-by-run search space definition is not yet supported with
this functionality.
Tune automatically converts search spaces to Optuna's format:
```python
from ray.tune.suggest.optuna import OptunaSearch
config = {
"a": tune.uniform(6, 8)
"b": tune.loguniform(1e-4, 1e-2)
}
optuna_search = OptunaSearch(
metric="loss",
mode="min")
tune.run(trainable, config=config, search_alg=optuna_search)
```
If you would like to pass the search space manually, the code would
look like this:
```python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
# Equivalent Optuna define-by-run function approach:
def define_search_space(trial: optuna.Trial):
trial.suggest_float("a", 6, 8)
trial.suggest_float("b", 1e-4, 1e-2, log=True)
# training logic goes into trainable, this is just
# for search space definition
optuna_search = OptunaSearch(
define_search_space,
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
```
Multi-objective optimization is supported:
```python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
# Note you have to specify metric and mode here instead of
# in tune.run
optuna_search = OptunaSearch(
space,
metric=["loss1", "loss2"],
mode=["min", "max"])
# Do not specify metric and mode here!
tune.run(
trainable,
search_alg=optuna_search
)
```
You can pass configs that will be evaluated first using
``points_to_evaluate``:
```python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
```
Avoid re-running evaluated trials by passing the rewards together with
`points_to_evaluate`:
```python
from ray.tune.suggest.optuna import OptunaSearch
import optuna
space = {
"a": optuna.distributions.UniformDistribution(6, 8),
"b": optuna.distributions.LogUniformDistribution(1e-4, 1e-2),
}
optuna_search = OptunaSearch(
space,
points_to_evaluate=[{"a": 6.5, "b": 5e-4}, {"a": 7.5, "b": 1e-3}]
evaluated_rewards=[0.89, 0.42]
metric="loss",
mode="min")
tune.run(trainable, search_alg=optuna_search)
```
"""
def __init__(
self,
space: Optional[
Union[
Dict[str, "OptunaDistribution"],
List[Tuple],
Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
]
] = None,
metric: Optional[Union[str, List[str]]] = None,
mode: Optional[Union[str, List[str]]] = None,
points_to_evaluate: Optional[List[Dict]] = None,
sampler: Optional["BaseSampler"] = None,
seed: Optional[int] = None,
evaluated_rewards: Optional[List] = None,
):
assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
super(OptunaSearch, self).__init__(metric=metric, mode=mode)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self).__name__))
space = self.convert_search_space(space)
else:
# Flatten to support nested dicts
space = flatten_dict(space, "/")
self._space = space
self._points_to_evaluate = points_to_evaluate or []
self._evaluated_rewards = evaluated_rewards
self._study_name = "optuna" # Fixed study name for in-memory storage
if sampler and seed:
logger.warning(
"You passed an initialized sampler to `OptunaSearch`. The "
"`seed` parameter has to be passed to the sampler directly "
"and will be ignored."
)
elif sampler:
assert isinstance(sampler, BaseSampler), (
"You can only pass an instance of " "`optuna.samplers.BaseSampler` " "as a sampler to `OptunaSearcher`."
)
self._sampler = sampler
self._seed = seed
self._completed_trials = set()
self._ot_trials = {}
self._ot_study = None
if self._space:
self._setup_study(mode)
def _setup_study(self, mode: Union[str, list]):
if self._metric is None and self._mode:
if isinstance(self._mode, list):
raise ValueError(
"If ``mode`` is a list (multi-objective optimization " "case), ``metric`` must be defined."
)
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
pruner = ot.pruners.NopPruner()
storage = ot.storages.InMemoryStorage()
try:
from packaging import version
except ImportError:
raise ImportError("To use BlendSearch, run: pip install flaml[blendsearch]")
if self._sampler:
sampler = self._sampler
elif isinstance(mode, list) and version.parse(ot.__version__) < version.parse("2.9.0"):
# MOTPESampler deprecated in Optuna>=2.9.0
sampler = ot.samplers.MOTPESampler(seed=self._seed)
else:
sampler = ot.samplers.TPESampler(seed=self._seed)
if isinstance(mode, list):
study_direction_args = dict(
directions=["minimize" if m == "min" else "maximize" for m in mode],
)
else:
study_direction_args = dict(
direction="minimize" if mode == "min" else "maximize",
)
self._ot_study = ot.study.create_study(
storage=storage,
sampler=sampler,
pruner=pruner,
study_name=self._study_name,
load_if_exists=True,
**study_direction_args,
)
if self._points_to_evaluate:
validate_warmstart(
self._space,
self._points_to_evaluate,
self._evaluated_rewards,
validate_point_name_lengths=not callable(self._space),
)
if self._evaluated_rewards:
for point, reward in zip(self._points_to_evaluate, self._evaluated_rewards):
self.add_evaluated_point(point, reward)
else:
for point in self._points_to_evaluate:
self._ot_study.enqueue_trial(point)
def set_search_properties(self, metric: Optional[str], mode: Optional[str], config: Dict, **spec) -> bool:
if self._space:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_study(self._mode)
return True
def _suggest_from_define_by_run_func(
self,
func: Callable[["OptunaTrial"], Optional[Dict[str, Any]]],
ot_trial: "OptunaTrial",
) -> Dict:
captor = _OptunaTrialSuggestCaptor(ot_trial)
time_start = time.time()
ret = func(captor)
time_taken = time.time() - time_start
if time_taken > DEFINE_BY_RUN_WARN_THRESHOLD_S:
warnings.warn(
"Define-by-run function passed in the `space` argument "
f"took {time_taken} seconds to "
"run. Ensure that actual computation, training takes "
"place inside Tune's train functions or Trainables "
"passed to `tune.run`."
)
if ret is not None:
if not isinstance(ret, dict):
raise TypeError(
"The return value of the define-by-run function "
"passed in the `space` argument should be "
"either None or a `dict` with `str` keys. "
f"Got {type(ret)}."
)
if not all(isinstance(k, str) for k in ret.keys()):
raise TypeError(
"At least one of the keys in the dict returned by the "
"define-by-run function passed in the `space` argument "
"was not a `str`."
)
return {**captor.captured_values, **ret} if ret else captor.captured_values
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._space:
raise RuntimeError(UNDEFINED_SEARCH_SPACE.format(cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(cls=self.__class__.__name__, metric=self._metric, mode=self._mode)
)
if callable(self._space):
# Define-by-run case
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask()
ot_trial = self._ot_trials[trial_id]
params = self._suggest_from_define_by_run_func(self._space, ot_trial)
else:
# Use Optuna ask interface (since version 2.6.0)
if trial_id not in self._ot_trials:
self._ot_trials[trial_id] = self._ot_study.ask(fixed_distributions=self._space)
ot_trial = self._ot_trials[trial_id]
params = ot_trial.params
return unflatten_dict(params)
def on_trial_result(self, trial_id: str, result: Dict):
if isinstance(self.metric, list):
# Optuna doesn't support incremental results
# for multi-objective optimization
return
if trial_id in self._completed_trials:
logger.warning(
f"Received additional result for trial {trial_id}, but " f"it already finished. Result: {result}"
)
return
metric = result[self.metric]
step = result[TRAINING_ITERATION]
ot_trial = self._ot_trials[trial_id]
ot_trial.report(metric, step)
def on_trial_complete(self, trial_id: str, result: Optional[Dict] = None, error: bool = False):
if trial_id in self._completed_trials:
logger.warning(
f"Received additional completion for trial {trial_id}, but " f"it already finished. Result: {result}"
)
return
ot_trial = self._ot_trials[trial_id]
if result:
if isinstance(self.metric, list):
val = [result.get(metric, None) for metric in self.metric]
else:
val = result.get(self.metric, None)
else:
val = None
ot_trial_state = OptunaTrialState.COMPLETE
if val is None:
if error:
ot_trial_state = OptunaTrialState.FAIL
else:
ot_trial_state = OptunaTrialState.PRUNED
try:
self._ot_study.tell(ot_trial, val, state=ot_trial_state)
except Exception as exc:
logger.warning(exc) # E.g. if NaN was reported
self._completed_trials.add(trial_id)
def add_evaluated_point(
self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None,
):
if not self._space:
raise RuntimeError(UNDEFINED_SEARCH_SPACE.format(cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(cls=self.__class__.__name__, metric=self._metric, mode=self._mode)
)
if callable(self._space):
raise TypeError(
"Define-by-run function passed in `space` argument is not "
"yet supported when using `evaluated_rewards`. Please provide "
"an `OptunaDistribution` dict or pass a Ray Tune "
"search space to `tune.run()`."
)
ot_trial_state = OptunaTrialState.COMPLETE
if error:
ot_trial_state = OptunaTrialState.FAIL
elif pruned:
ot_trial_state = OptunaTrialState.PRUNED
if intermediate_values:
intermediate_values_dict = {i: value for i, value in enumerate(intermediate_values)}
else:
intermediate_values_dict = None
trial = ot.trial.create_trial(
state=ot_trial_state,
value=value,
params=parameters,
distributions=self._space,
intermediate_values=intermediate_values_dict,
)
self._ot_study.add_trial(trial)
def save(self, checkpoint_path: str):
save_object = (
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
self._evaluated_rewards,
)
with open(checkpoint_path, "wb") as outputFile:
pickle.dump(save_object, outputFile)
def restore(self, checkpoint_path: str):
with open(checkpoint_path, "rb") as inputFile:
save_object = pickle.load(inputFile)
if len(save_object) == 5:
(
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
self._evaluated_rewards,
) = save_object
else:
# Backwards compatibility
(
self._sampler,
self._ot_trials,
self._ot_study,
self._points_to_evaluate,
) = save_object
@staticmethod
def convert_search_space(spec: Dict) -> Dict[str, Any]:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError("Grid search parameters cannot be automatically converted " "to an Optuna search space.")
# Flatten and resolve again after checking for grid search.
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
def resolve_value(domain: Domain) -> ot.distributions.BaseDistribution:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(sampler, LogUniform):
logger.warning(
"Optuna does not handle quantization in loguniform "
"sampling. The parameter will be passed but it will "
"probably be ignored."
)
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
if quantize:
logger.warning(
"Optuna does not support both quantization and "
"sampling from LogUniform. Dropped quantization."
)
return ot.distributions.LogUniformDistribution(domain.lower, domain.upper)
elif isinstance(sampler, Uniform):
if quantize:
return ot.distributions.DiscreteUniformDistribution(domain.lower, domain.upper, quantize)
return ot.distributions.UniformDistribution(domain.lower, domain.upper)
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return ot.distributions.IntLogUniformDistribution(
domain.lower, domain.upper - 1, step=quantize or 1
)
elif isinstance(sampler, Uniform):
# Upper bound should be inclusive for quantization and
# exclusive otherwise
return ot.distributions.IntUniformDistribution(
domain.lower,
domain.upper - int(bool(not quantize)),
step=quantize or 1,
)
elif isinstance(domain, Categorical):
if isinstance(sampler, Uniform):
return ot.distributions.CategoricalDistribution(domain.categories)
raise ValueError(
"Optuna search does not support parameters of type "
"`{}` with samplers of type `{}`".format(type(domain).__name__, type(domain.sampler).__name__)
)
# Parameter name is e.g. "a/b/c" for nested dicts
values = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return values | PypiClean |
/Buildpan_CLI-1.0-py3-none-any.whl/buildpan/previous_commit.py | import subprocess
from buildpan import setting, access_token, read_file, workflow
import requests, datetime
info = setting.info
# getting env variable
get_sha = info["GET_SHA_URL"]
fetch_log = info["FETCH_LOG_URL"]
def prev_commit(path, repo_name, project_id, username, provider):
try:
curtime = datetime.datetime.now()
# pooling to get sha for the previous commit
response = requests.get(get_sha, repo_name)
res=response.content
res=str(res)
index=res.index("'")
index1=res.index("'",index+1)
res=res[index+1:index1]
# restoring to previous commit
if provider == "github":
subprocess.call(["git", "fetch", "origin", res], cwd=path)
result = subprocess.run(["git", "checkout", "FETCH_HEAD"], stdout= subprocess.PIPE, stderr = subprocess.STDOUT, cwd=path)
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=Pull = '+str(result.stdout.decode())+'&status=success&operation=previous commit')
elif provider == "bitbucket":
refresh_token = read_file.read_file(path, project_id)
token = access_token.access_token(refresh_token)
pull = ["git", "-c", f"http.extraHeader=Authorization: Bearer {token}", "fetch", "origin", res]
subprocess.run(pull, cwd=path)
print(path)
subprocess.call(["git", "-c", f"http.extraHeader=Authorization: Bearer {token}", "checkout", "FETCH_HEAD"], cwd=path)
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=pull operation performed'+'&status=success&operation=previous commit')
workflow.workflows(path, project_id, repo_name, username)
except Exception as e:
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=Pull = '+str(e)+'&status=failed&operation=previous commit') | PypiClean |
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/twitter/models.py | from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime
import re
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
settings.use_editable()
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None | PypiClean |
/MolGNN_update-0.0.2-py3-none-any.whl/MolToGraph/MolToGraph.py | from numbers import Number
from typing import Sequence, Any, List
import numpy as np
import torch
from rdkit import Chem
from rdkit.Chem.rdmolops import GetAdjacencyMatrix
from torch_geometric.data import Data
def one_hot_encoding(x: Any, values: Sequence[Any]) -> List[int]:
"""
Sparse one-hot encoding of an input value, given a list of possible
values. If x is not in values, an extra dimension is added to the vector and
set to 1
Args:
x (Any)
values (Sequence[Any]): Possible values
Returns:
binary_encoding (List[int]): Sparse one-hot vector
"""
if x not in values:
x = values[-1]
binary_encoding = [int(v == x) for v in values]
return binary_encoding
def get_atom_features(
atom: Chem.Atom,
use_chirality: bool = True,
implicit_hydrogens: bool = True) -> np.ndarray:
"""
Featurize atom
Args:
atom (Chem.Atom): Atom
use_chirality (bool)
implicit_hydrogens (bool)
Returns:
atom_feature_vector (np.ndarray)
"""
allowed_elements = [
'C', 'N', 'O', 'S', 'F', 'Si', 'P', 'Cl', 'Br', 'Mg', 'Na', 'Ca', 'Fe', 'As', 'Al', 'I',
'B', 'V', 'K', 'Tl', 'Yb', 'Sb', 'Sn', 'Ag', 'Pd', 'Co', 'Se', 'Ti', 'Zn', 'Li', 'Ge',
'Cu', 'Au', 'Ni', 'Cd', 'In', 'Mn', 'Zr', 'Cr', 'Pt', 'Hg', 'Pb', 'Unknown'
]
if not implicit_hydrogens:
allowed_elements = ['H'] + allowed_elements
# compute atom features
atom_type_enc = one_hot_encoding(str(atom.GetSymbol()), allowed_elements)
n_heavy_neighbors_enc = one_hot_encoding(int(atom.GetDegree()), [0, 1, 2, 3, 4, "MoreThanFour"])
formal_charge_enc = one_hot_encoding(int(atom.GetFormalCharge()), [-3, -2, -1, 0, 1, 2, 3, "Extreme"])
hybridisation_type_enc = one_hot_encoding(str(atom.GetHybridization()),
["S", "SP", "SP2", "SP3", "SP3D", "SP3D2", "OTHER"])
is_in_a_ring_enc = [int(atom.IsInRing())]
is_aromatic_enc = [int(atom.GetIsAromatic())]
atomic_mass_scaled = [float((atom.GetMass() - 10.812) / 116.092)]
vdw_radius_scaled = [float((Chem.GetPeriodicTable().GetRvdw(atom.GetAtomicNum()) - 1.5) / 0.6)]
covalent_radius_scaled = [float((Chem.GetPeriodicTable().GetRcovalent(atom.GetAtomicNum()) - 0.64) / 0.76)]
atom_feature_vector = \
atom_type_enc + n_heavy_neighbors_enc + \
formal_charge_enc + hybridisation_type_enc + \
is_in_a_ring_enc + is_aromatic_enc + \
atomic_mass_scaled + vdw_radius_scaled + \
covalent_radius_scaled
if use_chirality:
chirality_type_enc = one_hot_encoding(str(atom.GetChiralTag()),
["CHI_UNSPECIFIED", "CHI_TETRAHEDRAL_CW", "CHI_TETRAHEDRAL_CCW",
"CHI_OTHER"])
atom_feature_vector += chirality_type_enc
if implicit_hydrogens:
n_hydrogens_enc = one_hot_encoding(int(atom.GetTotalNumHs()), [0, 1, 2, 3, 4, "MoreThanFour"])
atom_feature_vector += n_hydrogens_enc
return np.array(atom_feature_vector)
def get_bond_features(bond: Chem.Bond,
use_stereochemistry: bool = True) -> np.ndarray:
"""
Featurize bond
Args:
bond (Chem.Bond): Bond
use_stereochemistry (bool)
Returns:
bond_feature_vector (np.ndarray)
"""
allowed_bonds = [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
bond_type_enc = one_hot_encoding(bond.GetBondType(), allowed_bonds)
bond_is_conj_enc = [int(bond.GetIsConjugated())]
bond_is_in_ring_enc = [int(bond.IsInRing())]
bond_feature_vector = bond_type_enc + bond_is_conj_enc + bond_is_in_ring_enc
if use_stereochemistry:
stereo_type_enc = one_hot_encoding(str(bond.GetStereo()), ["STEREOZ", "STEREOE", "STEREOANY", "STEREONONE"])
bond_feature_vector += stereo_type_enc
return np.array(bond_feature_vector)
def create_pyg_data_lst(x_smiles: Sequence[str], y: Sequence[Number], device: str = 'cpu') -> List[Data]:
"""
Package a sequence of smiles strings and labels as a list
of PyTorch geometric data objects, containing the molecule as graph
Args:
x_smiles (Sequence[str])
y (Sequence[Number])
device (str)
Returns:
data_list (List[Data]): List of PyTorch geometric Data objects
"""
# We use this hack to determine the number of edge and node features
unrelated_smiles = "O=O"
unrelated_mol = Chem.MolFromSmiles(unrelated_smiles)
n_node_features = len(get_atom_features(unrelated_mol.GetAtomWithIdx(0)))
n_edge_features = len(get_bond_features(unrelated_mol.GetBondBetweenAtoms(0, 1)))
data_list = []
for smiles, label in zip(x_smiles, y):
# convert SMILES to RDKit mol object
mol = Chem.MolFromSmiles(smiles)
# get feature dimensions
n_nodes = mol.GetNumAtoms()
n_edges = 2 * mol.GetNumBonds()
# construct node feature matrix X of shape (n_nodes, n_node_features)
X = np.zeros((n_nodes, n_node_features))
for atom in mol.GetAtoms():
X[atom.GetIdx(), :] = get_atom_features(atom)
X = torch.tensor(X, dtype=torch.float)
# construct edge index array E of shape (2, n_edges)
(rows, cols) = np.nonzero(GetAdjacencyMatrix(mol))
torch_rows = torch.from_numpy(rows.astype(np.int64)).to(torch.long)
torch_cols = torch.from_numpy(cols.astype(np.int64)).to(torch.long)
E = torch.stack([torch_rows, torch_cols], dim=0)
# construct edge feature array EF of shape (n_edges, n_edge_features)
EF = np.zeros((n_edges, n_edge_features))
for (k, (i, j)) in enumerate(zip(rows, cols)):
EF[k] = get_bond_features(mol.GetBondBetweenAtoms(int(i), int(j)))
EF = torch.tensor(EF, dtype=torch.float)
# construct label tensor
y_tensor = torch.tensor(np.array([label]), dtype=torch.float)
# construct Pytorch Geometric data object and append to data list
data_list.append(Data(x=X, edge_index=E, edge_attr=EF, y=y_tensor).to(device))
return data_list | PypiClean |
/Curare-0.4.5-py3-none-any.whl/curare/lib/parse_versions.py | import datetime
import json
import re
import yaml
from docopt import docopt
from os import listdir
from pathlib import Path
from typing import Dict, List, Set, Tuple, Any
import sys
ANALYSIS_STEPS: Tuple[str, str, str, str] = (
'preprocessing',
'premapping',
'mapping',
'analysis'
)
def parse_config_yml(pipeline_file: Path):
steps: Dict[str, str] = {}
with pipeline_file.open() as yaml_config:
yaml_config: Dict[Any, Any] = yaml.load(yaml_config, Loader=yaml.BaseLoader)
for step in ANALYSIS_STEPS:
if step in yaml_config:
if 'modules' in yaml_config[step]:
if isinstance(yaml_config[step]['modules'], str):
steps[yaml_config[step]['modules']] = step
else:
for module in yaml_config[step]['modules']:
steps[module] = step
return steps
def get_specified_tools(log_str: str):
#e.g. "# update specs: ["python[version='>=3.4']", 'pandas==1.3.3', "matplotlib[version='>=3.2.1']", 'xlsxwriter==3.0.1', 'bioconductor-deseq2']"
primary_tools_tmp: List[str] = log_str[len("# update specs: "):].strip("\n \t[]").split(", ")
primary_tools: List[Tuple[str, str]] = []
for tool in primary_tools_tmp:
name: str
version: str
if "[version='" in tool:
name = tool[:tool.index("[")].strip('"\' ')
version = tool[tool.index("=")+1:].strip('"\' ]')
elif "==" in tool:
name, version = tool.strip('"\' ').split("==")
else:
name = tool.strip('"\' ')
version = ""
primary_tools.append(name)
return primary_tools
def get_dependencies(dependencies: List[str]):
primary_dependencies = []
secondary_dependencies = []
re_tool = re.compile(
r'[+-](?P<repository>.*)::(?P<tool>[a-zA-Z0-9-_\.]+)-(?P<version>[0-9.a-z_]+)-(?P<hash>[a-z0-9_]*)')
module_date: datetime.datetime = datetime.datetime.strptime(dependencies[0].strip().lstrip(' ==>').rstrip('<== '), "%Y-%m-%d %H:%M:%S")
specified_tools = get_specified_tools(dependencies[-1])
for line in dependencies:
if line.startswith("+"):
match = re_tool.search(line)
dependency = {
'full_dependency': line[1:].strip(),
'repository': match.group('repository'),
'tool': match.group('tool'),
'version': match.group('version'),
'hash': match.group('hash')
}
if match.group('tool') in specified_tools:
primary_dependencies.append(dependency)
else:
secondary_dependencies.append(dependency)
return primary_dependencies, secondary_dependencies, module_date
def main():
args = docopt(__doc__, version='1.0')
conda_dir = Path(args["--conda-dir"]).resolve()
pipeline_file = Path(args["--pipeline"])
output_json = Path(args["--output"]).resolve()
steps = parse_config_yml(pipeline_file)
output_list: List[Dict[str, Any]] = []
for file in [f for f in listdir(conda_dir) if f.endswith('.yaml')]:
print(file)
with open(conda_dir / file, 'r') as yaml_file:
first_line = yaml_file.readline()
if first_line.startswith("# module:") or first_line.startswith("#module:"):
module = first_line.split(": ")[1].strip()
# Environment exists but was not used in pipeline
if module not in steps:
continue
conda_env = file.split(".")[0]
with open(conda_dir / conda_env / 'conda-meta' / 'history', 'r') as history_file:
dependencies: List[str] = history_file.readlines()
primary_dependencies, secondary_dependencies, date = get_dependencies(dependencies)
step = steps[module] if module in steps else ''
output_list.append({
'name': module,
'step': step,
'primaryDependencies': primary_dependencies,
'secondaryDependencies': secondary_dependencies,
'date': date
})
to_delete: Set[int] = set()
for i, module in enumerate(output_list):
for j, other_module in enumerate(output_list[i+1:]):
if module['name'] == other_module['name'] and module['step'] == other_module['step']:
if module['date'] > other_module['date']:
to_delete.add(i+j+1)
else:
to_delete.add(i)
for module in sorted(to_delete, reverse=True):
del output_list[module]
for module in output_list:
del module['date']
with output_json.open('w') as f:
json.dump(output_list, f, indent=4)
if __name__ == '__main__':
main() | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/atari/config/serial/qbert/qbert_acer_config.py | from easydict import EasyDict
qbert_acer_config = dict(
exp_name='qbert_acer_seed0',
env=dict(
collector_env_num=16,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=int(1e6),
env_id='QbertNoFrameskip-v4',
#'ALE/Qbert-v5' is available. But special setting is needed after gym make.
frame_stack=4,
manager=dict(shared_memory=False, )
),
policy=dict(
cuda=True,
priority=False,
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
encoder_hidden_size_list=[128, 128, 512],
critic_head_hidden_size=512,
critic_head_layer_num=2,
actor_head_hidden_size=512,
actor_head_layer_num=2
),
unroll_len=64,
learn=dict(
# (int) collect n_sample data, train model update_per_collect times
# here we follow impala serial pipeline
update_per_collect=10,
# (int) the number of data for a train iteration
batch_size=64,
# grad_clip_type='clip_norm',
learning_rate_actor=0.0001,
learning_rate_critic=0.0003,
# (float) loss weight of the entropy regularization, the weight of policy network is set to 1
entropy_weight=0.01,
# (float) discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
# (float) additional discounting parameter
trust_region=True,
# (float) clip ratio of importance weights
c_clip_ratio=10,
),
collect=dict(
# (int) collect n_sample data, train model n_iteration times
n_sample=64,
# (float) discount factor for future reward, defaults int [0, 1]
discount_factor=0.99,
collector=dict(collect_print_freq=1000, ),
),
eval=dict(evaluator=dict(eval_freq=1000, )),
other=dict(replay_buffer=dict(replay_buffer_size=3000, ), ),
),
)
main_config = EasyDict(qbert_acer_config)
qbert_acer_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='acer'),
)
create_config = EasyDict(qbert_acer_create_config)
if __name__ == "__main__":
# or you can enter ding -m serial -c qbert_acer_config.py -s 0
from ding.entry import serial_pipeline
serial_pipeline([main_config, create_config], seed=0) | PypiClean |
/ORE_strhub-0.0.1-py3-none-any.whl/strhub/models/abinet/attention.py | import torch
import torch.nn as nn
from .transformer import PositionalEncoding
class Attention(nn.Module):
def __init__(self, in_channels=512, max_length=25, n_feature=256):
super().__init__()
self.max_length = max_length
self.f0_embedding = nn.Embedding(max_length, in_channels)
self.w0 = nn.Linear(max_length, n_feature)
self.wv = nn.Linear(in_channels, in_channels)
self.we = nn.Linear(in_channels, max_length)
self.active = nn.Tanh()
self.softmax = nn.Softmax(dim=2)
def forward(self, enc_output):
enc_output = enc_output.permute(0, 2, 3, 1).flatten(1, 2)
reading_order = torch.arange(self.max_length, dtype=torch.long, device=enc_output.device)
reading_order = reading_order.unsqueeze(0).expand(enc_output.size(0), -1) # (S,) -> (B, S)
reading_order_embed = self.f0_embedding(reading_order) # b,25,512
t = self.w0(reading_order_embed.permute(0, 2, 1)) # b,512,256
t = self.active(t.permute(0, 2, 1) + self.wv(enc_output)) # b,256,512
attn = self.we(t) # b,256,25
attn = self.softmax(attn.permute(0, 2, 1)) # b,25,256
g_output = torch.bmm(attn, enc_output) # b,25,512
return g_output, attn.view(*attn.shape[:2], 8, 32)
def encoder_layer(in_c, out_c, k=3, s=2, p=1):
return nn.Sequential(nn.Conv2d(in_c, out_c, k, s, p),
nn.BatchNorm2d(out_c),
nn.ReLU(True))
def decoder_layer(in_c, out_c, k=3, s=1, p=1, mode='nearest', scale_factor=None, size=None):
align_corners = None if mode == 'nearest' else True
return nn.Sequential(nn.Upsample(size=size, scale_factor=scale_factor,
mode=mode, align_corners=align_corners),
nn.Conv2d(in_c, out_c, k, s, p),
nn.BatchNorm2d(out_c),
nn.ReLU(True))
class PositionAttention(nn.Module):
def __init__(self, max_length, in_channels=512, num_channels=64,
h=8, w=32, mode='nearest', **kwargs):
super().__init__()
self.max_length = max_length
self.k_encoder = nn.Sequential(
encoder_layer(in_channels, num_channels, s=(1, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2)),
encoder_layer(num_channels, num_channels, s=(2, 2))
)
self.k_decoder = nn.Sequential(
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, num_channels, scale_factor=2, mode=mode),
decoder_layer(num_channels, in_channels, size=(h, w), mode=mode)
)
self.pos_encoder = PositionalEncoding(in_channels, dropout=0., max_len=max_length)
self.project = nn.Linear(in_channels, in_channels)
def forward(self, x):
N, E, H, W = x.size()
k, v = x, x # (N, E, H, W)
# calculate key vector
features = []
for i in range(0, len(self.k_encoder)):
k = self.k_encoder[i](k)
features.append(k)
for i in range(0, len(self.k_decoder) - 1):
k = self.k_decoder[i](k)
k = k + features[len(self.k_decoder) - 2 - i]
k = self.k_decoder[-1](k)
# calculate query vector
# TODO q=f(q,k)
zeros = x.new_zeros((self.max_length, N, E)) # (T, N, E)
q = self.pos_encoder(zeros) # (T, N, E)
q = q.permute(1, 0, 2) # (N, T, E)
q = self.project(q) # (N, T, E)
# calculate attention
attn_scores = torch.bmm(q, k.flatten(2, 3)) # (N, T, (H*W))
attn_scores = attn_scores / (E ** 0.5)
attn_scores = torch.softmax(attn_scores, dim=-1)
v = v.permute(0, 2, 3, 1).view(N, -1, E) # (N, (H*W), E)
attn_vecs = torch.bmm(attn_scores, v) # (N, T, E)
return attn_vecs, attn_scores.view(N, -1, H, W) | PypiClean |
/Group%20Buy%20Organizer-1.0.3.tar.gz/Group Buy Organizer-1.0.3/groupbuyorganizer/general/routes.py | from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required, login_user, logout_user
from groupbuyorganizer import database, bcrypt
from groupbuyorganizer.admin.models import Instance, User
from groupbuyorganizer.admin.utilities import HomeEvent
from groupbuyorganizer.events.forms import CreateEventForm
from groupbuyorganizer.events.models import Event
from groupbuyorganizer.general.forms import LoginForm, RegistrationForm, UserOptionsForm
general = Blueprint('general', __name__)
@general.route("/events/")
@general.route("/", methods=['GET', 'POST'])
def home():
form = CreateEventForm()
instance = Instance.query.first()
events = Event.query.order_by(Event.date_created.desc()).all()
home_event_list = []
for event in events:
home_event_list.append(HomeEvent(event))
if form.validate_on_submit():
event = Event(name=form.event_name.data, added_by=current_user.id)
database.session.add(event)
database.session.commit()
flash('Event created!', 'success')
return redirect(url_for('general.home'))
return render_template('home.html', root_created = instance.root_created, home_event_list=home_event_list,
registration_enabled = instance.registration_enabled, events=events, form=form,
users_can_see_master_overview=instance.users_can_see_master_overview)
@general.route("/about/")
def about():
return render_template('about.html', title='About')
@general.route("/register/", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
flash('You are already logged in!', 'info')
return redirect(url_for('general.home'))
instance = Instance.query.first()
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
instance = Instance.query.first()
if instance.root_created == False:
user.is_root = True
user.is_admin = True
instance.root_created = True
flash('Your account has been created!', 'success')
else:
flash(f'Your account has been created, you can now log in', 'success')
database.session.add(user)
database.session.commit()
return redirect(url_for('general.login'))
return render_template('register.html', title='Join Today', form=form,
registration_enabled=instance.registration_enabled)
@general.route("/login/", methods=['GET', 'POST'])
def login():
instance = Instance.query.first()
if current_user.is_authenticated:
return redirect(url_for('general.home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
if user.disabled:
flash('This account has been disabled.', 'danger')
return redirect(url_for('general.home'))
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('general.home'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('login.html', title='Log In', form=form, registration_enabled=instance.registration_enabled)
@general.route("/logout/")
def logout():
logout_user()
return redirect(url_for('general.home'))
@general.route("/account/", methods=['GET', 'POST'])
@login_required
def account():
form = UserOptionsForm()
if form.validate_on_submit():
current_user.username = form.username.data
current_user.email = form.email.data
database.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('general.account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('account.html', title=f'{current_user.username} Account Settings', form=form)
@general.route("/help/")
def help():
return render_template('help.html', title='Help') | PypiClean |
/MetaCerberus-1.1.tar.gz/MetaCerberus-1.1/lib/metacerberus_prostats.py | from pathlib import Path
import pandas as pd
import statistics as stat
def getStats(faa: str, fileHmmer: str, dfCount: dict, config: dict, summary_out:str):
minscore = config["MINSCORE"]
# sum up proteins in FASTA file
proteins = {}
with open(faa, "r") as reader:
name = ""
line = reader.readline()
while line:
if line.startswith('>'):
name = line[1:].rstrip().split(sep=None, maxsplit=1)[0]
length = 0
line = reader.readline()
while line:
if line.startswith('>'):
break
length += len(line.strip())
line = reader.readline()
proteins[name] = dict(count=0, found=0, length=length)
continue
line = reader.readline()
# sum up proteins in HMMER file
hmmHits = dict()
with open(fileHmmer, "r") as reader:
for i,line in enumerate(reader,1):
#"target", "query", "e-value", "score", "length", "start", "end"
line = line.split('\t')
try:
target = line[0]
query = line[1]
evalue = line[2]
score = float(line[3])
length = int(line[4])
except:
continue
if target in proteins:
proteins[target]['count'] += 1
if score >= minscore:
proteins[target]['found'] += 1
if target not in hmmHits:
hmmHits[target] = list()
hmmHits[target].append([query, score, evalue, length])
else:
print("ERROR: Target on line", i, "of HMMER file not in protein fasta:", fileHmmer)
return None
# Annotate proteins
dfLookup = dict()
for dbname in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
dbPath = Path(config['PATHDB'], f"{dbname}-onto_rel1.tsv")
dfLookup[dbname] = pd.read_csv(dbPath, sep='\t').fillna('')
with open(summary_out, 'w') as writer:
print("locus_tag", 'FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG', "Best hit", "length_bp", "e-value", "score", "EC_number", "product", sep='\t', file=writer)
for target in proteins.keys():
if target in hmmHits:
hmmHits[target].sort(key = lambda x: x[1], reverse=True)
annotations = dict()
for match in hmmHits[target]:
q,s,e,l = match
for dbname in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
if dbname not in annotations:
annotations[dbname] = []
rows = pd.DataFrame(dfLookup[dbname][dfLookup[dbname].ID==q])
if not rows.empty:
name = f"{dbname}:{rows.iloc[0].Function}"
EC = rows.iloc[0].EC
annotations[dbname] += [q]
annotations = [", ".join(annotations[k]) for k in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']]
query,score,evalue,length = hmmHits[target][0]
name = ""
EC = ""
for dbname in ['FOAM', 'KEGG', 'COG', 'CAZy', 'PHROG', 'VOG']:
rows = pd.DataFrame(dfLookup[dbname][dfLookup[dbname].ID==query])
if not rows.empty:
name = rows.iloc[0].Function
EC = rows.iloc[0].EC
break
if name:
print(target, *annotations, f"{dbname}:{query}", length, evalue, score, EC, name, sep='\t', file=writer)
else:
print(target, '', '', '', '', '', '', "", "", "", "", "", "Hypothetical", sep='\t', file=writer)
else:
print(target, '', '', '', '', '', '', "", "", "", "", "", "Hypothetical", sep='\t', file=writer)
del dfLookup
# calculate stats
lengths = [ item['length'] for item in proteins.values() ]
found = [ v['found'] for k,v in proteins.items() if v['found']>1 ]
stats = {
"Protein Count (Total)": len(proteins),
f"Protein Count (>Min Score)": len(found),
"% Proteins > Min Score": 0 if not len(proteins) else round(100.0*len(found)/len(proteins), 2),
"Average Protein Length": 0 if not len(lengths) else round(stat.mean(lengths), 2)
}
for dbName,filepath in dfCount.items():
if Path(filepath).exists():
df = pd.read_csv(filepath, sep='\t')
stats[dbName+' ID Count'] = df[df['Level']=='Function']['Count'].sum()
return stats | PypiClean |
/Firefly-vis-2.0.4.tar.gz/Firefly-vis-2.0.4/src/Firefly/static/js/viewer/applyUISelections.js |
function resetViewer(){
viewerParams.reset = true;
viewerParams.ready = false;
console.log('Reset options', viewerParams.parts.options)
//reset all the parts specific values to the initial ones
initPVals();
initScene();
drawScene();
//recreate the GUI
clearInterval(viewerParams.waitForInit);
viewerParams.waitForInit = setInterval(function(){
if (viewerParams.ready){
clearInterval(viewerParams.waitForInit);
sendInitGUI([],[{'makeUI':viewerParams.local}]);
viewerParams.reset = false;
}
}, 100);
}
//reset to the initial Options file
function resetToOptions(){
console.log("Resetting to Default", viewerParams.parts.options0);
viewerParams.parts.options = JSON.parse(JSON.stringify(viewerParams.parts.options0));
resetViewer();
}
//to load in a new data set
function loadNewData(){
//sendInitGUI([],[{'makeUI':!viewerParams.usingSocket}]);
//reset a few variables and remake the UI
//sendToGUI({'setGUIParamByKey':[null, "partsKeys"]})
var forGUI = [];
forGUI.push({'clearGUIinterval':null});
forGUI.push({'defineGUIParams':null});
sendToGUI(forGUI);
viewerParams.parts = null;
viewerParams.camera = null;
viewerParams.boxSize = 0;
viewerParams.controls.dispose();
makeViewer();
//if (viewerParams.local) makeUI(local=true);
forGUI = [{'makeUI':viewerParams.local}];
if (!viewerParams.local) {
forGUI.push({'setGUIParamByKey':[false,"GUIready"]});
forGUI.push({'showSplash':true});
}
d3.select('#particleUI').html("");
d3.select('.UIcontainer').html("");
d3.select("#splashdivLoader").selectAll('svg').remove();
d3.select("#splashdiv5").text("Loading...");
if (Object.keys(viewerParams.dir).length > 1){
forGUI.push({'showLoadingButton':'#selectStartupButton'});
} else {
forGUI.push({'showLoadingButton':'#loadDataButton'});
}
sendToGUI(forGUI);
d3.select("#loader").style("display","visible");
viewerParams.loadfrac = 0.;
viewerParams.haveUI = false;
showSplash(true);
viewerParams.loaded = false;
viewerParams.pauseAnimation = true;
//document.getElementById("inputFilenames").click();
}
//reset to a preset file
function resetToPreset(preset){
console.log(preset,viewerParams.parts.options0)
console.log("Resetting to Preset");
viewerParams.parts.options = preset;
resetViewer();
}
//check whether the center is locked or not
function checkCenterLock(checked){
viewerParams.controls.dispose();
viewerParams.switchControls = true;
if (checked) {
viewerParams.useTrackball = true;
} else {
viewerParams.useTrackball = false;
}
initControls();
}
//reset the camera position to whatever is saved in the options parameters
function resetCamera() {
var screenWidth = window.innerWidth;
var screenHeight = window.innerHeight;
var aspect = screenWidth / screenHeight;
viewerParams.camera = new THREE.PerspectiveCamera( viewerParams.fov, aspect, viewerParams.zmin, viewerParams.zmax);
viewerParams.camera.up.set(0, -1, 0);
viewerParams.scene.add(viewerParams.camera);
setCenter(viewerParams.parts[viewerParams.partsKeys[0]].Coordinates);
viewerParams.camera.position.set(viewerParams.center.x, viewerParams.center.y, viewerParams.center.z - viewerParams.boxSize/2.);
viewerParams.camera.lookAt(viewerParams.scene.position);
//change the center?
if (viewerParams.parts.options.hasOwnProperty('center')){
if (viewerParams.parts.options.center != null){
viewerParams.center = new THREE.Vector3(viewerParams.parts.options.center[0], viewerParams.parts.options.center[1], viewerParams.parts.options.center[2]);
setBoxSize(viewerParams.parts[viewerParams.partsKeys[0]].Coordinates);
}
}
//change location of camera?
if (viewerParams.parts.options.hasOwnProperty('camera')){
if (viewerParams.parts.options.camera != null){
viewerParams.camera.position.set(viewerParams.parts.options.camera[0], viewerParams.parts.options.camera[1], viewerParams.parts.options.camera[2]);
}
}
//change the rotation of the camera (which requires Fly controls)
if (viewerParams.parts.options.hasOwnProperty('cameraRotation')){
if (viewerParams.parts.options.cameraRotation != null){
viewerParams.camera.rotation.set(viewerParams.parts.options.cameraRotation[0], viewerParams.parts.options.cameraRotation[1], viewerParams.parts.options.cameraRotation[2]);
}
}
//change the rotation of the camera (which requires Fly controls)
if (viewerParams.parts.options.hasOwnProperty('cameraUp')){
if (viewerParams.parts.options.cameraUp != null){
viewerParams.camera.up.set(viewerParams.parts.options.cameraUp[0], viewerParams.parts.options.cameraUp[1], viewerParams.parts.options.cameraUp[2]);
}
}
viewerParams.controls.dispose();
initControls();
sendCameraInfoToGUI(null, true);
}
//reset the camera center. Can be useful when switching back and forth between trackball and fly controls
function recenterCamera() {
initControls();
sendCameraInfoToGUI(null, true);
}
//replace the current camera settings in options with the current camera position and rotation (to return here upon clicking reset)
//NOTE: with a reset, this will set the controls to fly controls
function saveCamera() {
if (viewerParams.parts.options.hasOwnProperty('camera')){
if (viewerParams.parts.options.camera == null){
viewerParams.parts.options.camera = [0,0,0];
}
} else {
viewerParams.parts.options.camera = [0,0,0];
}
viewerParams.parts.options.camera[0] = viewerParams.camera.position.x;
viewerParams.parts.options.camera[1] = viewerParams.camera.position.y;
viewerParams.parts.options.camera[2] = viewerParams.camera.position.z;
if (viewerParams.parts.options.hasOwnProperty('center')){
if (viewerParams.parts.options.center == null){
viewerParams.parts.options.center = [0,0,0];
}
} else {
viewerParams.parts.options.center = [0,0,0];
}
if (viewerParams.useTrackball){
viewerParams.parts.options.center[0] = viewerParams.controls.target.x;
viewerParams.parts.options.center[1] = viewerParams.controls.target.y;
viewerParams.parts.options.center[2] = viewerParams.controls.target.z;
}
if (viewerParams.parts.options.hasOwnProperty('cameraRotation')){
if (viewerParams.parts.options.cameraRotation != null){
viewerParams.parts.options.cameraRotation[0] = viewerParams.camera.rotation.x;
viewerParams.parts.options.cameraRotation[1] = viewerParams.camera.rotation.y;
viewerParams.parts.options.cameraRotation[2] = viewerParams.camera.rotation.z;
}
}
}
//turn on/off velocity vectors
function checkVelBox(args){
var p = args[0];
var checked = args[1];
viewerParams.showVel[p] = false;
if (checked){
viewerParams.showVel[p] = true;
}
}
//turn on/off the colormap
function checkColormapBox(args){
var p = args[0];
var checked = args[1];
var forGUI = [];
viewerParams.showColormap[p] = false;
if (checked){
viewerParams.showColormap[p] = true;
viewerParams.updateColormap[p] = true;
viewerParams.updateFilter[p] = true;
}
forGUI.push({'setGUIParamByKey':[viewerParams.showColormap, "showColormap"]})
forGUI.push({'fillColorbarContainer':p})
sendToGUI(forGUI);
console.log(p, " showColormap:", viewerParams.showColormap[p])
// redraw particle type (this seems necessary to enable the correct blending)
drawScene(pDraw = [p]);
}
//turn on/off the invert filter option
function checkInvertFilterBox(args){
var p = args[0];
var fk = args[1];
var checked = args[2];
viewerParams.invertFilter[p][fk] = false;
if (checked){
viewerParams.invertFilter[p][fk] = true;
}
viewerParams.updateFilter[p] = true;
}
//change the color of particles
function checkColor(args){
var p = args[0];
var rgb = args[1];
viewerParams.Pcolors[p] = [rgb.r/255., rgb.g/255., rgb.b/255., rgb.a];
}
//function to check which types to plot
function checkshowParts(args){
var p = args[0];
var checked = args[1];
viewerParams.updateOnOff[p] = true;
viewerParams.updateFilter[p] = true;
viewerParams.showParts[p] = false;
if (checked){
viewerParams.showParts[p] = true;
}
}
//check for stereo separation
function checkStereoLock(checked){
if (checked) {
viewerParams.normalRenderer = viewerParams.renderer;
viewerParams.renderer = viewerParams.effect;
viewerParams.useStereo = true;
} else {
viewerParams.renderer = viewerParams.normalRenderer;
viewerParams.renderer.setSize(window.innerWidth, window.innerHeight);
viewerParams.useStereo = false;
}
}
//set values based on various text box entries
function checkText(args){
var id = args[0];
var value = args[1];
var cameraPosition = new THREE.Vector3(viewerParams.camera.position.x, viewerParams.camera.position.y, viewerParams.camera.position.z);
var cameraRotation = new THREE.Vector3(viewerParams.camera.rotation.x, viewerParams.camera.rotation.y, viewerParams.camera.rotation.z);
if (id == "CenterXText") viewerParams.center.x = parseFloat(value);
if (id == "CenterYText") viewerParams.center.y = parseFloat(value);
if (id == "CenterZText") viewerParams.center.z = parseFloat(value);
if (id == "CameraXText") cameraPosition.x = parseFloat(value) - viewerParams.center.x;
if (id == "CameraYText") cameraPosition.y = parseFloat(value) - viewerParams.center.y
if (id == "CameraZText") cameraPosition.z = parseFloat(value) - viewerParams.center.z;
if (id == "RotXText") cameraRotation.x = parseFloat(value)
if (id == "RotYText") cameraRotation.y = parseFloat(value)
if (id == "RotZText") cameraRotation.z = parseFloat(value)
if (id == "RenderXText") viewerParams.renderWidth = parseInt(value);
if (id == "RenderYText") viewerParams.renderHeight = parseInt(value);
viewerParams.camera.position.set(cameraPosition.x, cameraPosition.y, cameraPosition.z);
console.log('===here camera', cameraRotation);
viewerParams.camera.rotation.set(cameraRotation.x, cameraRotation.y, cameraRotation.z);
viewerParams.controls.target = new THREE.Vector3(viewerParams.center.x, viewerParams.center.y, viewerParams.center.z);
}
//apply the options file to the UI
function applyUIoptions(){
if (viewerParams.parts){
// now check if we need to hide any of this
if (viewerParams.parts.options.hasOwnProperty('UI')){
if (!viewerParams.parts.options.UI){
d3.select('.UIcontainer').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIfullscreen')){
if (!viewerParams.parts.options.UIfullscreen){
d3.select('#fullScreenDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIsnapshot')){
if (!viewerParams.parts.options.UIsnapshot){
d3.select('#snapshotDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIreset')){
if (!viewerParams.parts.options.UIreset){
d3.select('#resetDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIsavePreset')){
if (!viewerParams.parts.options.UIsavePreset){
d3.select('#savePresetDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIloadNewData')){
if (!viewerParams.parts.options.UIloadNewData){
d3.select('#loadNewDataDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIcameraControls')){
if (!viewerParams.parts.options.UIcameraControls){
d3.select('#cameraControlsDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIdecimation')){
if (!viewerParams.parts.options.UIdecimation){
d3.select('#decimationDiv').style('display','none');
}
}
if (viewerParams.parts.options.hasOwnProperty('UIparticle')){
for (i=0; i<viewerParams.partsKeys.length; i++){
d = viewerParams.partsKeys[i];
if (viewerParams.parts.options.UIparticle.hasOwnProperty(d)){
if (!viewerParams.parts.options.UIparticle[d]){
d3.selectAll('div.'+d+'Div').style('display','none');
}
}
}
}
}
}
//save the image to a file
function saveFile(strData, filename) {
var link = document.createElement('a');
if (typeof link.download === 'string') {
document.body.appendChild(link); //Firefox requires the link to be in the body
link.download = filename;
link.href = strData;
link.click();
document.body.removeChild(link); //remove the link when done
} else {
console.log("can't save file");
return;
//location.replace(uri);
}
}
//render the image
function renderImage() {
//https://stackoverflow.com/questions/26193702/three-js-how-can-i-make-a-2d-snapshot-of-a-scene-as-a-jpg-image
//this sometimes breaks in Chrome when rendering takes too long
//best to use Firefox to render images
var imgData, imgNode;
var strDownloadMime = "image/octet-stream";
var strMime = "image/png";
var screenWidth = window.innerWidth;
var screenHeight = window.innerHeight;
var aspect = screenWidth / screenHeight;
try {
//resize
console.log('capturing image', viewerParams.renderWidth, viewerParams.renderHeight)
viewerParams.renderer.setSize(viewerParams.renderWidth, viewerParams.renderHeight);
viewerParams.camera.aspect = viewerParams.renderWidth / viewerParams.renderHeight;
viewerParams.camera.updateProjectionMatrix();
viewerParams.renderer.render( viewerParams.scene, viewerParams.camera );
//save image
imgData = viewerParams.renderer.domElement.toDataURL(strMime);
saveFile(imgData.replace(strMime, strDownloadMime), "image.png");
//back to original size
viewerParams.renderer.setSize(screenWidth, screenHeight);
viewerParams.camera.aspect = aspect;
viewerParams.camera.updateProjectionMatrix();
viewerParams.renderer.render( viewerParams.scene, viewerParams.camera );
} catch (e) {
console.log(e);
return;
}
}
function isPrimitive(test) {
return test !== Object(test);
}
function copyValue(a){
if (isPrimitive(a)) {
return a;
} else {
return JSON.parse(JSON.stringify(a));
}
}
function createPreset(){
var preset = {};
if (viewerParams.useTrackball){
preset.center = copyValue([viewerParams.controls.target.x, viewerParams.controls.target.y, viewerParams.controls.target.z]);
} else {
var xx = new THREE.Vector3(0,0,0);
viewerParams.camera.getWorldDirection(xx);
preset.center = copyValue([xx.x + viewerParams.camera.position.x, xx.y + viewerParams.camera.position.y, xx.z + viewerParams.camera.position.z]);
}
preset.camera = copyValue([viewerParams.camera.position.x, viewerParams.camera.position.y, viewerParams.camera.position.z]);
preset.startFly = !viewerParams.useTrackball;
preset.cameraRotation = copyValue([viewerParams.camera.rotation.x, viewerParams.camera.rotation.y, viewerParams.camera.rotation.z]);
preset.cameraUp = copyValue([viewerParams.camera.up.x, viewerParams.camera.up.y, viewerParams.camera.up.z]);
preset.friction = copyValue(viewerParams.friction);
preset.stereo = copyValue(viewerParams.useStereo);
preset.stereoSep = copyValue(viewerParams.stereoSep);
preset.decimate = copyValue(viewerParams.decimate);
preset.maxVrange = copyValue(viewerParams.maxVrange);
//for the UI
preset.UI = copyValue(viewerParams.parts.options.UI);
preset.UIfullscreen = copyValue(viewerParams.parts.options.UIfullscreen);
preset.UIsnapshot = copyValue(viewerParams.parts.options.UIsnapshot);
preset.UIreset = copyValue(viewerParams.parts.options.UIreset);
preset.UIsavePreset = copyValue(viewerParams.parts.options.UIsavePreset);
preset.UIloadNewData = copyValue(viewerParams.parts.options.UIloadNewData);
preset.UIcameraControls = copyValue(viewerParams.parts.options.UIcameraControls);
preset.UIdecimation = copyValue(viewerParams.parts.options.UIdecimation);
//particle specific options
preset.showParts = {};
preset.sizeMult = {};
preset.color = {};
preset.plotNmax = {};
preset.showVel = {};
preset.velType = {};
preset.filterLims = {};
preset.filterVals = {};
preset.invertFilter = {};
preset.colormapLims = {};
preset.colormapVals = {};
preset.UIparticle = {};
preset.UIdropdown = {};
preset.UIcolorPicker = {};
preset.showColormap = {};
preset.colormap = {};
preset.colormapVariable = {};
for (var i=0; i<viewerParams.partsKeys.length; i++){
var p = copyValue(viewerParams.partsKeys[i]);
preset.showParts[p] = copyValue(viewerParams.showParts[p]);
preset.sizeMult[p] = copyValue(viewerParams.PsizeMult[p]);
preset.color[p] = copyValue(viewerParams.Pcolors[p]);
preset.plotNmax[p] = copyValue(viewerParams.plotNmax[p]);
preset.showVel[p] = copyValue(viewerParams.showVel[p]);
preset.velType[p] = copyValue(viewerParams.velType[p]);
preset.showColormap[p] = copyValue(viewerParams.showColormap[p]);
preset.colormap[p] = copyValue(viewerParams.colormap[p]);
preset.colormapVariable[p] = copyValue(viewerParams.colormapVariable[p]);
preset.UIparticle[p] = copyValue(viewerParams.parts.options.UIparticle[p]);
preset.UIdropdown[p] = copyValue(viewerParams.parts.options.UIdropdown[p]);
preset.UIcolorPicker[p] = copyValue(viewerParams.parts.options.UIcolorPicker[p]);
preset.filterLims[p] = {};
preset.filterVals[p] = {};
preset.invertFilter[p] = {};
preset.colormapLims[p] = {};
preset.colormapVals[p] = {};
for (k=0; k<viewerParams.fkeys[p].length; k++){
var fkey = copyValue(viewerParams.fkeys[p][k]);
preset.filterLims[p][fkey] = copyValue(viewerParams.filterLims[p][fkey]);
preset.filterVals[p][fkey] = copyValue(viewerParams.filterVals[p][fkey]);
preset.invertFilter[p][fkey] = copyValue(viewerParams.invertFilter[p][fkey]);
}
for (k=0; k<viewerParams.ckeys[p].length; k++){
var ckey = copyValue(viewerParams.ckeys[p][k]);
preset.colormapLims[p][ckey] = copyValue(viewerParams.colormapLims[p][ckey]);
preset.colormapVals[p][ckey] = copyValue(viewerParams.colormapVals[p][ckey]);
}
}
preset.loaded = true;
return preset;
}
function savePreset(){
var preset = createPreset();
//https://stackoverflow.com/questions/33780271/export-a-json-object-to-a-text-file
var str = JSON.stringify(preset)
//Save the file contents as a DataURI
var dataUri = 'data:application/json;charset=utf-8,'+ encodeURIComponent(str);
saveFile(dataUri,'preset.json');
}
function updateFriction(value){
if (viewerParams.useTrackball){
viewerParams.controls.dynamicDampingFactor = value;
} else {
viewerParams.controls.movementSpeed = 1. - Math.pow(value, viewerParams.flyffac);
}
viewerParams.friction = value;
}
function updateStereoSep(value){
viewerParams.stereoSep = value;
viewerParams.effect.setEyeSeparation(viewerParams.stereoSep);
} | PypiClean |
/Newgram-0.0.5.tar.gz/Newgram-0.0.5/newgram/types/messages_and_media/animation.py |
from datetime import datetime
from typing import List
import newgram
from newgram import raw, utils
from newgram import types
from newgram.file_id import FileId, FileType, FileUniqueId, FileUniqueType
from ..object import Object
class Animation(Object):
"""An animation file (GIF or H.264/MPEG-4 AVC video without sound).
Parameters:
file_id (``str``):
Identifier for this file, which can be used to download or reuse the file.
file_unique_id (``str``):
Unique identifier for this file, which is supposed to be the same over time and for different accounts.
Can't be used to download or reuse the file.
width (``int``):
Animation width as defined by sender.
height (``int``):
Animation height as defined by sender.
duration (``int``):
Duration of the animation in seconds as defined by sender.
file_name (``str``, *optional*):
Animation file name.
mime_type (``str``, *optional*):
Mime type of a file as defined by sender.
file_size (``int``, *optional*):
File size.
date (:py:obj:`~datetime.datetime`, *optional*):
Date the animation was sent.
thumbs (List of :obj:`~newgram.types.Thumbnail`, *optional*):
Animation thumbnails.
"""
def __init__(
self,
*,
client: "newgram.Client" = None,
file_id: str,
file_unique_id: str,
width: int,
height: int,
duration: int,
file_name: str = None,
mime_type: str = None,
file_size: int = None,
date: datetime = None,
thumbs: List["types.Thumbnail"] = None
):
super().__init__(client)
self.file_id = file_id
self.file_unique_id = file_unique_id
self.file_name = file_name
self.mime_type = mime_type
self.file_size = file_size
self.date = date
self.width = width
self.height = height
self.duration = duration
self.thumbs = thumbs
@staticmethod
def _parse(
client,
animation: "raw.types.Document",
video_attributes: "raw.types.DocumentAttributeVideo",
file_name: str
) -> "Animation":
return Animation(
file_id=FileId(
file_type=FileType.ANIMATION,
dc_id=animation.dc_id,
media_id=animation.id,
access_hash=animation.access_hash,
file_reference=animation.file_reference
).encode(),
file_unique_id=FileUniqueId(
file_unique_type=FileUniqueType.DOCUMENT,
media_id=animation.id
).encode(),
width=getattr(video_attributes, "w", 0),
height=getattr(video_attributes, "h", 0),
duration=getattr(video_attributes, "duration", 0),
mime_type=animation.mime_type,
file_size=animation.size,
file_name=file_name,
date=utils.timestamp_to_datetime(animation.date),
thumbs=types.Thumbnail._parse(client, animation),
client=client
) | PypiClean |
/Auto_FOX-1.0.0b1-py3-none-any.whl/FOX/properties/__init__.py | r"""Functions for calculating/extracting various properties.
Each function can be used to calculate the respective property as is,
or to extract it from a passed :class:`qmflows.Result <qmflows.packages.Result>` instance.
.. code-block:: python
>>> from FOX.properties import get_bulk_modulus
>>> from qmflows.packages import Result
>>> import numpy as np
>>> # Calculate the bulk modulus from a set of arrays
>>> pressure: np.ndarray = ...
>>> volume: np.ndarray = ...
>>> get_bulk_modulus(pressure, volume) # doctest: +SKIP
array([[[ 0., 1., 2.],
[ 3., 4., 5.]],
<BLANKLINE>
[[ 6., 7., 8.],
[ 9., 10., 11.]]])
>>> # Calculate the bulk modulus from a qmflows.Result instance
>>> result: Result = ...
>>> get_bulk_modulus.from_result(result) # doctest: +SKIP
array([[[ 0., 1., 2.],
[ 3., 4., 5.]],
<BLANKLINE>
[[ 6., 7., 8.],
[ 9., 10., 11.]]])
An example for how :func:`get_bulk_modulus` can be used in conjunction
with the :ref:`ARMC yaml input <monte_carlo_parameters.pes>`.
Note that additional CP2K ``print`` keys are required in order for it
to export the necessary properties.
.. code-block:: yaml
job:
type: FOX.armc.PackageManager
molecule: mol.xyz
md:
template: qmflows.md.specific.cp2k_mm
settings:
cell_parameters: [50, 50, 50]
input:
motion:
print:
cell on:
filename: ''
forces on:
filename: ''
md:
ensemble: NVE
thermostat:
print:
temperature on:
filename: ''
pes:
bulk_modulus:
func: FOX.properties.get_bulk_modulus.from_result
ref: [1.0]
kwargs:
reduce: mean
Index
-----
.. currentmodule:: FOX.properties
.. list-table::
:widths: 50 50
:header-rows: 0
* - :func:`get_pressure <get_pressure>` (forces, coords, volume[...])
- Calculate the pressure from the passed **forces**.
* - :func:`get_bulk_modulus <get_bulk_modulus>` (pressure, volume[...])
- Calculate the bulk modulus via differentiation of **pressure** w.r.t. **volume**.
* - :func:`get_attr <get_attr>` (obj, name[, default, reduce, axis])
- :func:`getattr` with support for additional keyword argument.
* - :func:`call_method <call_method>` (obj, name, \*args[, reduce, axis])
- Call the **name** method of **obj**.
* - :class:`FromResult` (func[, result_func])
- A class for wrapping :data:`~types.FunctionType` objects.
API
---
.. autofunction:: get_pressure
.. autofunction:: get_bulk_modulus
.. autofunction:: get_attr
.. autofunction:: call_method
.. autoclass:: FromResult(func, name, module=None, doc=None)
.. data:: REDUCTION_NAMES
A mapping that maps :meth:`from_result` aliases to callbacks.
In addition to the examples below, all reducable ufuncs
from :ref:`numpy <numpy:ufuncs>` and :mod:`scipy.special` are available.
:type: :class:`types.MappingProxyType[str, Callable[[np.ndarray], np.float64]] <types.MappingProxyType>`
"""
# flake8: noqa: E402
from .base import FromResult, get_attr, call_method
from .pressure import get_pressure
from .bulk_modulus import get_bulk_modulus
__all__ = ['FromResult', 'get_attr', 'call_method', 'get_pressure', 'get_bulk_modulus'] | PypiClean |
/KratosStructuralMechanicsApplication-9.4-cp310-cp310-win_amd64.whl/KratosMultiphysics/StructuralMechanicsApplication/structural_mechanics_implicit_dynamic_solver.py | import KratosMultiphysics
# Import applications
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
# Import base class file
from KratosMultiphysics.StructuralMechanicsApplication.structural_mechanics_solver import MechanicalSolver
from KratosMultiphysics.StructuralMechanicsApplication import auxiliary_methods_solvers
def CreateSolver(model, custom_settings):
return ImplicitMechanicalSolver(model, custom_settings)
class ImplicitMechanicalSolver(MechanicalSolver):
"""The structural mechanics implicit dynamic solver.
This class creates the mechanical solvers for implicit dynamic analysis.
It currently supports Newmark, Bossak and dynamic relaxation schemes.
See structural_mechanics_solver.py for more information.
"""
def __init__(self, model, custom_settings):
# Construct the base solver.
super().__init__(model, custom_settings)
KratosMultiphysics.Logger.PrintInfo("::[ImplicitMechanicalSolver]:: ", "Construction finished")
@classmethod
def GetDefaultParameters(cls):
this_defaults = KratosMultiphysics.Parameters("""{
"time_integration_method" : "implicit",
"scheme_type" : "bossak",
"damp_factor_m" :-0.3,
"newmark_beta" : 0.25,
"rayleigh_alpha" : 0.0,
"rayleigh_beta" : 0.0
}""")
this_defaults.AddMissingParameters(super().GetDefaultParameters())
return this_defaults
def GetMinimumBufferSize(self):
base_min_buffer_size = super().GetMinimumBufferSize()
scheme_type = self.settings["scheme_type"].GetString()
if "bdf" in scheme_type or scheme_type == "backward_euler":
return max(base_min_buffer_size, auxiliary_methods_solvers.GetBDFIntegrationOrder(scheme_type)+1)
else:
return base_min_buffer_size
def AddVariables(self):
super().AddVariables()
self._add_dynamic_variables()
KratosMultiphysics.Logger.PrintInfo("::[ImplicitMechanicalSolver]:: ", "Variables ADDED")
def AddDofs(self):
super().AddDofs()
self._add_dynamic_dofs()
KratosMultiphysics.Logger.PrintInfo("::[ImplicitMechanicalSolver]:: ", "DOF's ADDED")
def InitializeSolutionStep(self):
# Using the base InitializeSolutionStep
super().InitializeSolutionStep()
# Some pre-processes may affect the system of equations, we rebuild the equation ids
process_info = self.main_model_part.ProcessInfo
if process_info[KratosMultiphysics.STEP] == 1 and process_info[StructuralMechanicsApplication.RESET_EQUATION_IDS]:
# Resetting the global equations ids
self._GetBuilderAndSolver().SetUpSystem(self.GetComputingModelPart())
#### Private functions ####
def _CreateScheme(self):
scheme_type = self.settings["scheme_type"].GetString()
# Setting the Rayleigh damping parameters
process_info = self.main_model_part.ProcessInfo
process_info[StructuralMechanicsApplication.RAYLEIGH_ALPHA] = self.settings["rayleigh_alpha"].GetDouble()
process_info[StructuralMechanicsApplication.RAYLEIGH_BETA] = self.settings["rayleigh_beta"].GetDouble()
# Setting the time integration schemes
if(scheme_type == "newmark"):
damp_factor_m = 0.0
newmark_beta = self.settings["newmark_beta"].GetDouble()
mechanical_scheme = KratosMultiphysics.ResidualBasedBossakDisplacementScheme(damp_factor_m, newmark_beta)
elif(scheme_type == "bossak"):
damp_factor_m = self.settings["damp_factor_m"].GetDouble()
newmark_beta = self.settings["newmark_beta"].GetDouble()
mechanical_scheme = KratosMultiphysics.ResidualBasedBossakDisplacementScheme(damp_factor_m, newmark_beta)
elif(scheme_type == "pseudo_static"):
mechanical_scheme = KratosMultiphysics.ResidualBasedPseudoStaticDisplacementScheme(StructuralMechanicsApplication.RAYLEIGH_BETA)
elif(scheme_type.startswith("bdf") or scheme_type == "backward_euler"):
order = auxiliary_methods_solvers.GetBDFIntegrationOrder(scheme_type)
# In case of rotation dof we declare the dynamic variables
if self.settings["rotation_dofs"].GetBool():
bdf_parameters = KratosMultiphysics.Parameters(""" {
"domain_size" : 3,
"integration_order" : 2,
"solution_variables" : ["DISPLACEMENT","ROTATION"]
} """)
bdf_parameters["domain_size"].SetInt(process_info[KratosMultiphysics.DOMAIN_SIZE])
mechanical_scheme = KratosMultiphysics.ResidualBasedBDFCustomScheme(order, bdf_parameters)
else:
mechanical_scheme = KratosMultiphysics.ResidualBasedBDFDisplacementScheme(order)
elif(scheme_type == "relaxation"):
damp_factor_f =-0.3
dynamic_factor_m = 10.0
mechanical_scheme = StructuralMechanicsApplication.ResidualBasedRelaxationScheme(damp_factor_f, dynamic_factor_m)
else:
err_msg = "The requested scheme type \"" + scheme_type + "\" is not available!\n"
err_msg += "Available options are: \"newmark\", \"bossak\", \"pseudo_static\", \"backward_euler\", \"bdf1\", \"bdf2\", \"bdf3\", \"bdf4\", \"bdf5\", \"relaxation\""
raise Exception(err_msg)
return mechanical_scheme | PypiClean |
/HR_Neural_Networks-1.0.6.tar.gz/HR_Neural_Networks-1.0.6/HR_Neural_Networks/Paper_experiments/Section_6.2/Rice_TRADES/Rice_TRADES.py | import pandas as pd
# %%
import sys
sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from trades import trades_loss
# %%
import os
# %%
from pathlib import Path
# %%
import argparse
import logging
import sys
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from preactresnet import PreActResNet18
from utils import *
device = 'cuda' if torch.cuda.is_available() else 'cpu'
mu = torch.tensor(cifar10_mean).view(3,1,1).to(device)
std = torch.tensor(cifar10_std).view(3,1,1).to(device)
def normalize(X):
return (X - mu)/std
upper_limit, lower_limit = 1,0
torch.backends.cudnn.benchmark = True # Volta: 100, much faster than the original implementation
torch.backends.cudnn.enabled = True
def clamp(X, lower_limit, upper_limit):
return torch.max(torch.min(X, upper_limit), lower_limit)
class Batches():
def __init__(self, dataset, batch_size, shuffle, set_random_choices=False, num_workers=0, drop_last=False):
self.dataset = dataset
self.batch_size = batch_size
self.set_random_choices = set_random_choices
self.dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers, pin_memory=True, shuffle=shuffle, drop_last=drop_last
)
def __iter__(self):
if self.set_random_choices:
self.dataset.set_random_choices()
return ({'input': x.to(device).float(), 'target': y.to(device).long()} for (x,y) in self.dataloader)
def __len__(self):
return len(self.dataloader)
def mixup_data(x, y, alpha=1.0):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).to(device)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def attack_pgd(model, X, y, epsilon, alpha, attack_iters, restarts,
norm, early_stop=False,
mixup=False, y_a=None, y_b=None, lam=None):
max_loss = torch.zeros(y.shape[0]).to(device)
max_delta = torch.zeros_like(X).to(device)
for _ in range(restarts):
delta = torch.zeros_like(X).to(device)
if norm == "l_inf":
delta.uniform_(-epsilon, epsilon)
elif norm == "l_2":
delta.normal_()
d_flat = delta.view(delta.size(0),-1)
n = d_flat.norm(p=2,dim=1).view(delta.size(0),1,1,1)
r = torch.zeros_like(n).uniform_(0, 1)
delta *= r/n*epsilon
else:
raise ValueError
delta = clamp(delta, lower_limit-X, upper_limit-X)
delta.requires_grad = True
for _ in range(attack_iters):
output = model(normalize(X + delta))
if early_stop:
index = torch.where(output.max(1)[1] == y)[0]
else:
index = slice(None,None,None)
if not isinstance(index, slice) and len(index) == 0:
break
if mixup:
criterion = nn.CrossEntropyLoss()
loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
loss = F.cross_entropy(output, y)
loss.backward()
grad = delta.grad.detach()
d = delta[index, :, :, :]
g = grad[index, :, :, :]
x = X[index, :, :, :]
if norm == "l_inf":
d = torch.clamp(d + alpha * torch.sign(g), min=-epsilon, max=epsilon)
elif norm == "l_2":
g_norm = torch.norm(g.view(g.shape[0],-1),dim=1).view(-1,1,1,1)
scaled_g = g/(g_norm + 1e-10)
d = (d + scaled_g*alpha).view(d.size(0),-1).renorm(p=2,dim=0,maxnorm=epsilon).view_as(d)
d = clamp(d, lower_limit - x, upper_limit - x)
delta.data[index, :, :, :] = d
delta.grad.zero_()
if mixup:
criterion = nn.CrossEntropyLoss(reduction='none')
all_loss = mixup_criterion(criterion, model(normalize(X+delta)), y_a, y_b, lam)
else:
all_loss = F.cross_entropy(model(normalize(X+delta)), y, reduction='none')
max_delta[all_loss >= max_loss] = delta.detach()[all_loss >= max_loss]
max_loss = torch.max(max_loss, all_loss)
return max_delta
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--alpha', default=0, type = float)
parser.add_argument('--r', default=0, type = float)
parser.add_argument('--model', default='PreActResNet18')
parser.add_argument('--trades_beta', default=6)
parser.add_argument('--l2', default=0, type=float)
parser.add_argument('--l1', default=0, type=float)
parser.add_argument('--batch-size', default=128, type=int)
parser.add_argument('--data-dir', default='../Files', type=str)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--lr-schedule', default='piecewise', choices=['superconverge', 'piecewise', 'linear', 'piecewisesmoothed', 'piecewisezoom', 'piecewise_fixed', 'onedrop', 'multipledecay', 'cosine'])
parser.add_argument('--lr-max', default=0.1, type=float)
parser.add_argument('--lr-one-drop', default=0.01, type=float)
parser.add_argument('--lr-drop-epoch', default=100, type=int)
parser.add_argument('--attack', default='pgd', type=str, choices=['pgd', 'fgsm', 'free', 'none'])
parser.add_argument('--epsilon', default=8, type=int)
parser.add_argument('--attack-iters', default=10, type=int)
parser.add_argument('--restarts', default=1, type=int)
parser.add_argument('--pgd-alpha', default=2, type=float)
parser.add_argument('--fgsm-alpha', default=1.25, type=float)
parser.add_argument('--norm', default='l_inf', type=str, choices=['l_inf', 'l_2'])
parser.add_argument('--fgsm-init', default='random', choices=['zero', 'random', 'previous'])
parser.add_argument('--fname', default='cifar_model', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--half', action='store_true')
parser.add_argument('--width-factor', default=10, type=int)
parser.add_argument('--resume', default=0, type=int)
parser.add_argument('--cutout', action='store_true')
parser.add_argument('--cutout-len', type=int)
parser.add_argument('--mixup', action='store_true')
parser.add_argument('--mixup-alpha', type=float)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--val', action='store_true')
parser.add_argument('--chkpt-iters', default=10, type=int)
return parser.parse_args('')
def main():
train_losses = []
trades_losses = []
nat_test_losses = []
adv_test_losses = []
train_accuracies = []
nat_test_accuracies = []
adv_test_accuracies = []
args = get_args()
if not os.path.exists(args.fname):
os.makedirs(args.fname)
logger = logging.getLogger(__name__)
logging.basicConfig(
format='[%(asctime)s] - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG,
handlers=[
logging.FileHandler(os.path.join(args.fname, 'eval.log' if args.eval else 'output.log')),
logging.StreamHandler()
])
model_name = f"r = {args.trades_beta}, eps = {args.epsilon/255}"
logger.info(args)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
transforms = [Crop(32, 32), FlipLR()]
if args.cutout:
transforms.append(Cutout(args.cutout_len, args.cutout_len))
if args.val:
try:
dataset = torch.load("cifar10_validation_split.pth")
except:
print("Couldn't find a dataset with a validation split, did you run "
"generate_validation.py?")
return
val_set = list(zip(transpose(dataset['val']['data']/255.), dataset['val']['labels']))
val_batches = Batches(val_set, args.batch_size, shuffle=False, num_workers=2)
else:
dataset = cifar10(args.data_dir)
train_set = list(zip(transpose(pad(dataset['train']['data'], 4)/255.),
dataset['train']['labels']))
train_set_x = Transform(train_set, transforms)
train_batches = Batches(train_set_x, args.batch_size, shuffle=True, set_random_choices=True, num_workers=2)
test_set = list(zip(transpose(dataset['test']['data']/255.), dataset['test']['labels']))
test_batches = Batches(test_set, args.batch_size, shuffle=False, num_workers=2)
epsilon = (args.epsilon / 255.)
pgd_alpha = (args.pgd_alpha / 255.)
if args.model == 'PreActResNet18':
model = PreActResNet18()
elif args.model == 'WideResNet':
raise ValueError("We do not use WideResNet in our experiments")
# model = WideResNet(34, 10, widen_factor=args.width_factor, dropRate=0.0)
else:
raise ValueError("Unknown model")
model = nn.DataParallel(model).to(device)
model.train()
if args.l2:
decay, no_decay = [], []
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
decay.append(param)
else:
no_decay.append(param)
params = [{'params':decay, 'weight_decay':args.l2},
{'params':no_decay, 'weight_decay': 0 }]
else:
params = model.parameters()
opt = torch.optim.SGD(params, lr=args.lr_max, momentum=0.9, weight_decay=5e-4)
criterion = nn.CrossEntropyLoss(reduction = 'none')
if args.attack == 'free':
delta = torch.zeros(args.batch_size, 3, 32, 32).to(device)
delta.requires_grad = True
elif args.attack == 'fgsm' and args.fgsm_init == 'previous':
delta = torch.zeros(args.batch_size, 3, 32, 32).to(device)
delta.requires_grad = True
if args.attack == 'free':
epochs = int(math.ceil(args.epochs / args.attack_iters))
else:
epochs = args.epochs
if args.lr_schedule == 'superconverge':
lr_schedule = lambda t: np.interp([t], [0, args.epochs * 2 // 5, args.epochs], [0, args.lr_max, 0])[0]
elif args.lr_schedule == 'piecewise':
def lr_schedule(t):
if t / args.epochs < 0.5:
return args.lr_max
elif t / args.epochs < 0.75:
return args.lr_max / 10.
else:
return args.lr_max / 100.
elif args.lr_schedule == 'piecewise_fixed':
def lr_schedule(t):
if t < 100:
return args.lr_max
elif t < 150:
return args.lr_max / 10.
elif t < 200:
return args.lr_max / 100.
elif t < 250:
return args.lr_max / 200.
elif t < 300:
return args.lr_max / 400.
elif t < 350:
return args.lr_max / 800.
else:
return args.lr_max / 1600.
elif args.lr_schedule == 'linear':
lr_schedule = lambda t: np.interp([t], [0, args.epochs // 3, args.epochs * 2 // 3, args.epochs], [args.lr_max, args.lr_max, args.lr_max / 10, args.lr_max / 100])[0]
elif args.lr_schedule == 'onedrop':
def lr_schedule(t):
if t < args.lr_drop_epoch:
return args.lr_max
else:
return args.lr_one_drop
elif args.lr_schedule == 'multipledecay':
def lr_schedule(t):
return args.lr_max - (t//(args.epochs//10))*(args.lr_max/10)
elif args.lr_schedule == 'cosine':
def lr_schedule(t):
return args.lr_max * 0.5 * (1 + np.cos(t / args.epochs * np.pi))
best_test_robust_acc = 0
best_val_robust_acc = 0
if args.resume:
start_epoch = args.resume
model.load_state_dict(torch.load(os.path.join(args.fname, f'model_{start_epoch-1}.pth')))
opt.load_state_dict(torch.load(os.path.join(args.fname, f'opt_{start_epoch-1}.pth')))
logger.info(f'Resuming at epoch {start_epoch}')
best_test_robust_acc = torch.load(os.path.join(args.fname, f'model_best.pth'))['test_robust_acc']
if args.val:
best_val_robust_acc = torch.load(os.path.join(args.fname, f'model_val.pth'))['val_robust_acc']
else:
start_epoch = 0
if args.eval:
if not args.resume:
logger.info("No model loaded to evaluate, specify with --resume FNAME")
return
logger.info("[Evaluation mode]")
logger.info('Epoch \t Train Time \t Test Time \t LR \t \t Train Loss \t Train Acc \t Train Robust Loss \t Train Robust Acc \t Test Loss \t Test Acc \t Test Robust Loss \t Test Robust Acc')
for epoch in range(start_epoch, epochs):
model.train()
start_time = time.time()
train_loss = 0
train_acc = 0
train_robust_loss = 0
train_robust_acc = 0
train_n = 0
counter = 0
for i, batch in enumerate(train_batches):
torch.cuda.empty_cache()
if args.eval:
break
X, y = batch['input'], batch['target']
if args.mixup:
X, y_a, y_b, lam = mixup_data(X, y, args.mixup_alpha)
X, y_a, y_b = map(Variable, (X, y_a, y_b))
lr = lr_schedule(epoch + (i + 1) / len(train_batches))
opt.param_groups[0].update(lr=lr)
if args.l1:
for name,param in model.named_parameters():
if 'bn' not in name and 'bias' not in name:
robust_loss += args.l1*param.abs().sum()
opt.zero_grad()
robust_loss, x_adv = trades_loss(model=model,
x_natural=X,
y=y,
optimizer=opt,
step_size=pgd_alpha,
epsilon=epsilon,
perturb_steps=args.attack_iters,
beta=args.trades_beta,
distance='l_inf',
device = device)
robust_loss.backward()
opt.step()
output = model(normalize(X))
robust_output = model(x_adv)
if args.mixup:
robust_loss = mixup_criterion(criterion, robust_output, y_a, y_b, lam)
else:
robust_loss = criterion(robust_output, y)
if args.mixup:
loss = mixup_criterion(criterion, output, y_a, y_b, lam)
else:
loss = criterion(output, y)
train_robust_loss += torch.mean(robust_loss).item() * y.size(0)
train_robust_acc += (robust_output.max(1)[1] == y).sum().item()
train_loss += torch.mean(loss).item() * y.size(0)
train_acc += (output.max(1)[1] == y).sum().item()
train_n += y.size(0)
robust_loss.cpu().detach()
trades_losses.append(train_robust_loss/train_n)
train_losses.append(train_loss/train_n)
train_accuracies.append(train_acc/train_n)
train_time = time.time()
model.eval()
test_loss = 0
test_acc = 0
test_robust_loss = 0
test_robust_acc = 0
test_n = 0
for i, batch in enumerate(test_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
test_robust_loss += torch.mean(robust_loss).item() * y.size(0)
test_robust_acc += (robust_output.max(1)[1] == y).sum().item()
test_loss += torch.mean(loss).item() * y.size(0)
test_acc += (output.max(1)[1] == y).sum().item()
test_n += y.size(0)
nat_test_losses.append(test_loss/test_n)
adv_test_losses.append(test_robust_loss/test_n)
nat_test_accuracies.append(test_acc/test_n)
adv_test_accuracies.append(test_robust_acc/test_n)
losses = pd.DataFrame([train_losses, trades_losses,
nat_test_losses, adv_test_losses,
train_accuracies,
nat_test_accuracies, adv_test_accuracies]).T
losses.columns = ["Training Loss", "Inflated Loss",
"Natural Testing Loss", "Adversarial Testing Loss",
"Training Accuracy",
"Natural Testing Accuracy", "Adversarial Testing Accuracy"]
results_path = f"Rice_TRADES_experiments_results_{args.model}/{model_name}/"
Path(results_path).mkdir(parents=True, exist_ok=True)
losses.to_csv(results_path + "metrics.csv")
test_time = time.time()
if args.val:
val_loss = 0
val_acc = 0
val_robust_loss = 0
val_robust_acc = 0
val_n = 0
for i, batch in enumerate(val_batches):
X, y = batch['input'], batch['target']
# Random initialization
if args.attack == 'none':
delta = torch.zeros_like(X)
else:
delta = attack_pgd(model, X, y, epsilon, pgd_alpha, args.attack_iters, args.restarts, args.norm, early_stop=args.eval)
delta = delta.detach()
robust_output = model(normalize(torch.clamp(X + delta[:X.size(0)], min=lower_limit, max=upper_limit)))
robust_loss = criterion(robust_output, y)
output = model(normalize(X))
loss = criterion(output, y)
val_robust_loss += robust_loss.item() * y.size(0)
val_robust_acc += (robust_output.max(1)[1] == y).sum().item()
val_loss += loss.item() * y.size(0)
val_acc += (output.max(1)[1] == y).sum().item()
val_n += y.size(0)
if not args.eval:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, lr,
train_loss/train_n, train_acc/train_n, train_robust_loss/train_n, train_robust_acc/train_n,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
if args.val:
logger.info('validation %.4f \t %.4f \t %.4f \t %.4f',
val_loss/val_n, val_acc/val_n, val_robust_loss/val_n, val_robust_acc/val_n)
if val_robust_acc/val_n > best_val_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
'val_robust_acc':val_robust_acc/val_n,
'val_robust_loss':val_robust_loss/val_n,
'val_loss':val_loss/val_n,
'val_acc':val_acc/val_n,
}, os.path.join(args.fname, f'model_val.pth'))
best_val_robust_acc = val_robust_acc/val_n
# save checkpoint
if (epoch+1) % args.chkpt_iters == 0 or epoch+1 == epochs:
torch.save(model.state_dict(), os.path.join(args.fname, f'model_{epoch}.pth'))
torch.save(opt.state_dict(), os.path.join(args.fname, f'opt_{epoch}.pth'))
# save best
if test_robust_acc/test_n > best_test_robust_acc:
torch.save({
'state_dict':model.state_dict(),
'test_robust_acc':test_robust_acc/test_n,
'test_robust_loss':test_robust_loss/test_n,
'test_loss':test_loss/test_n,
'test_acc':test_acc/test_n,
}, os.path.join(args.fname, f'model_best.pth'))
best_test_robust_acc = test_robust_acc/test_n
else:
logger.info('%d \t %.1f \t \t %.1f \t \t %.4f \t %.4f \t %.4f \t %.4f \t \t %.4f \t \t %.4f \t %.4f \t %.4f \t \t %.4f',
epoch, train_time - start_time, test_time - train_time, -1,
-1, -1, -1, -1,
test_loss/test_n, test_acc/test_n, test_robust_loss/test_n, test_robust_acc/test_n)
return
if __name__ == "__main__":
main()
# %% | PypiClean |
/AutoTrader_Web_API_Stocks_Developer-1.3.5-py3-none-any.whl/com/dakshata/trading/model/platform/PlatformOrder.py | from com.dakshata.trading.model.portfolio.CoreOrder import CoreOrder
class PlatformOrder(CoreOrder):
def __init__(self, id, tradeType, orderType, productType, \
variety, validity, quantity, disclosedQuantity, \
price, triggerPrice, amo, statusMessage, publisherId, \
pseudoAccount, tradingAccount, stockBroker, exchange, symbol, \
independentExchange, independentSymbol, \
modifiedTime, createdTime, \
parentOrderId, exchangeOrderId, averagePrice, \
clientId, rawStatus, platformTime, exchangeTime, \
pendingQuantity, filledQuantity, platform, \
status, nestRequestId, *args, **kwargs):
super().__init__(id, tradeType, orderType, productType, \
variety, validity, quantity, disclosedQuantity, \
price, triggerPrice, amo, statusMessage, publisherId, \
pseudoAccount, tradingAccount, stockBroker, exchange, symbol, \
independentExchange, independentSymbol, \
modifiedTime, createdTime)
self.parent_order_id = parentOrderId
self.exchange_order_id = exchangeOrderId
self.average_price = averagePrice
self.client_id = clientId
self.raw_status = rawStatus
self.platform_time = platformTime
self.exchange_time = exchangeTime
self.pending_quantity = pendingQuantity
self.filled_quantity = filledQuantity
self.platform = platform
self.status = status
self.nest_request_id = nestRequestId
def is_open(self):
return this.status and this.status.upper() == 'OPEN'
def is_trigger_pending(self):
return this.status and this.status.upper() == 'TRIGGER_PENDING'
def is_open_or_trigger_pending(self):
return self.is_open() or self.is_trigger_pending()
def is_cancelled(self):
return this.status and this.status.upper() == 'CANCELLED'
def is_rejected(self):
return this.status and this.status.upper() == 'REJECTED'
def __str__(self):
return super().__str__() | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/auth/hashers.py | import base64
import binascii
import functools
import hashlib
import importlib
import math
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.crypto import (
RANDOM_STRING_CHARS,
constant_time_compare,
get_random_string,
md5,
pbkdf2,
)
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from django.utils.module_loading import import_string
from django.utils.translation import gettext_noop as _
UNUSABLE_PASSWORD_PREFIX = "!" # This will never be a valid encoded hash
UNUSABLE_PASSWORD_SUFFIX_LENGTH = (
40 # number of random chars to add after UNUSABLE_PASSWORD_PREFIX
)
def is_password_usable(encoded):
"""
Return True if this password wasn't generated by
User.set_unusable_password(), i.e. make_password(None).
"""
return encoded is None or not encoded.startswith(UNUSABLE_PASSWORD_PREFIX)
def check_password(password, encoded, setter=None, preferred="default"):
"""
Return a boolean of whether the raw password matches the three
part encoded digest.
If setter is specified, it'll be called when you need to
regenerate the password.
"""
if password is None or not is_password_usable(encoded):
return False
preferred = get_hasher(preferred)
try:
hasher = identify_hasher(encoded)
except ValueError:
# encoded is gibberish or uses a hasher that's no longer installed.
return False
hasher_changed = hasher.algorithm != preferred.algorithm
must_update = hasher_changed or preferred.must_update(encoded)
is_correct = hasher.verify(password, encoded)
# If the hasher didn't change (we don't protect against enumeration if it
# does) and the password should get updated, try to close the timing gap
# between the work factor of the current encoded password and the default
# work factor.
if not is_correct and not hasher_changed and must_update:
hasher.harden_runtime(password, encoded)
if setter and is_correct and must_update:
setter(password)
return is_correct
def make_password(password, salt=None, hasher="default"):
"""
Turn a plain-text password into a hash for database storage
Same as encode() but generate a new random salt. If password is None then
return a concatenation of UNUSABLE_PASSWORD_PREFIX and a random string,
which disallows logins. Additional random string reduces chances of gaining
access to staff or superuser accounts. See ticket #20079 for more info.
"""
if password is None:
return UNUSABLE_PASSWORD_PREFIX + get_random_string(
UNUSABLE_PASSWORD_SUFFIX_LENGTH
)
if not isinstance(password, (bytes, str)):
raise TypeError(
"Password must be a string or bytes, got %s." % type(password).__qualname__
)
hasher = get_hasher(hasher)
salt = salt or hasher.salt()
return hasher.encode(password, salt)
@functools.lru_cache
def get_hashers():
hashers = []
for hasher_path in settings.PASSWORD_HASHERS:
hasher_cls = import_string(hasher_path)
hasher = hasher_cls()
if not getattr(hasher, "algorithm"):
raise ImproperlyConfigured(
"hasher doesn't specify an algorithm name: %s" % hasher_path
)
hashers.append(hasher)
return hashers
@functools.lru_cache
def get_hashers_by_algorithm():
return {hasher.algorithm: hasher for hasher in get_hashers()}
@receiver(setting_changed)
def reset_hashers(*, setting, **kwargs):
if setting == "PASSWORD_HASHERS":
get_hashers.cache_clear()
get_hashers_by_algorithm.cache_clear()
def get_hasher(algorithm="default"):
"""
Return an instance of a loaded password hasher.
If algorithm is 'default', return the default hasher. Lazily import hashers
specified in the project's settings file if needed.
"""
if hasattr(algorithm, "algorithm"):
return algorithm
elif algorithm == "default":
return get_hashers()[0]
else:
hashers = get_hashers_by_algorithm()
try:
return hashers[algorithm]
except KeyError:
raise ValueError(
"Unknown password hashing algorithm '%s'. "
"Did you specify it in the PASSWORD_HASHERS "
"setting?" % algorithm
)
def identify_hasher(encoded):
"""
Return an instance of a loaded password hasher.
Identify hasher algorithm by examining encoded hash, and call
get_hasher() to return hasher. Raise ValueError if
algorithm cannot be identified, or if hasher is not loaded.
"""
# Ancient versions of Django created plain MD5 passwords and accepted
# MD5 passwords with an empty salt.
if (len(encoded) == 32 and "$" not in encoded) or (
len(encoded) == 37 and encoded.startswith("md5$$")
):
algorithm = "unsalted_md5"
# Ancient versions of Django accepted SHA1 passwords with an empty salt.
elif len(encoded) == 46 and encoded.startswith("sha1$$"):
algorithm = "unsalted_sha1"
else:
algorithm = encoded.split("$", 1)[0]
return get_hasher(algorithm)
def mask_hash(hash, show=6, char="*"):
"""
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked
def must_update_salt(salt, expected_entropy):
# Each character in the salt provides log_2(len(alphabet)) bits of entropy.
return len(salt) * math.log2(len(RANDOM_STRING_CHARS)) < expected_entropy
class BasePasswordHasher:
"""
Abstract base class for password hashers
When creating your own hasher, you need to override algorithm,
verify(), encode() and safe_summary().
PasswordHasher objects are immutable.
"""
algorithm = None
library = None
salt_entropy = 128
def _load_library(self):
if self.library is not None:
if isinstance(self.library, (tuple, list)):
name, mod_path = self.library
else:
mod_path = self.library
try:
module = importlib.import_module(mod_path)
except ImportError as e:
raise ValueError(
"Couldn't load %r algorithm library: %s"
% (self.__class__.__name__, e)
)
return module
raise ValueError(
"Hasher %r doesn't specify a library attribute" % self.__class__.__name__
)
def salt(self):
"""
Generate a cryptographically secure nonce salt in ASCII with an entropy
of at least `salt_entropy` bits.
"""
# Each character in the salt provides
# log_2(len(alphabet)) bits of entropy.
char_count = math.ceil(self.salt_entropy / math.log2(len(RANDOM_STRING_CHARS)))
return get_random_string(char_count, allowed_chars=RANDOM_STRING_CHARS)
def verify(self, password, encoded):
"""Check if the given password is correct."""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a verify() method"
)
def _check_encode_args(self, password, salt):
if password is None:
raise TypeError("password must be provided.")
if not salt or "$" in salt:
raise ValueError("salt must be provided and cannot contain $.")
def encode(self, password, salt):
"""
Create an encoded database value.
The result is normally formatted as "algorithm$salt$hash" and
must be fewer than 128 characters.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide an encode() method"
)
def decode(self, encoded):
"""
Return a decoded database value.
The result is a dictionary and should contain `algorithm`, `hash`, and
`salt`. Extra keys can be algorithm specific like `iterations` or
`work_factor`.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a decode() method."
)
def safe_summary(self, encoded):
"""
Return a summary of safe values.
The result is a dictionary and will be used where the password field
must be displayed to construct a safe representation of the password.
"""
raise NotImplementedError(
"subclasses of BasePasswordHasher must provide a safe_summary() method"
)
def must_update(self, encoded):
return False
def harden_runtime(self, password, encoded):
"""
Bridge the runtime gap between the work factor supplied in `encoded`
and the work factor suggested by this hasher.
Taking PBKDF2 as an example, if `encoded` contains 20000 iterations and
`self.iterations` is 30000, this method should run password through
another 10000 iterations of PBKDF2. Similar approaches should exist
for any hasher that has a work factor. If not, this method should be
defined as a no-op to silence the warning.
"""
warnings.warn(
"subclasses of BasePasswordHasher should provide a harden_runtime() method"
)
class PBKDF2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 600000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
self._check_encode_args(password, salt)
iterations = iterations or self.iterations
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode("ascii").strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def decode(self, encoded):
algorithm, iterations, salt, hash = encoded.split("$", 3)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"iterations": int(iterations),
"salt": salt,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(password, decoded["salt"], decoded["iterations"])
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("iterations"): decoded["iterations"],
_("salt"): mask_hash(decoded["salt"]),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
update_salt = must_update_salt(decoded["salt"], self.salt_entropy)
return (decoded["iterations"] != self.iterations) or update_salt
def harden_runtime(self, password, encoded):
decoded = self.decode(encoded)
extra_iterations = self.iterations - decoded["iterations"]
if extra_iterations > 0:
self.encode(password, decoded["salt"], extra_iterations)
class PBKDF2SHA1PasswordHasher(PBKDF2PasswordHasher):
"""
Alternate PBKDF2 hasher which uses SHA1, the default PRF
recommended by PKCS #5. This is compatible with other
implementations of PBKDF2, such as openssl's
PKCS5_PBKDF2_HMAC_SHA1().
"""
algorithm = "pbkdf2_sha1"
digest = hashlib.sha1
class Argon2PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the argon2 algorithm.
This is the winner of the Password Hashing Competition 2013-2015
(https://password-hashing.net). It requires the argon2-cffi library which
depends on native C code and might cause portability issues.
"""
algorithm = "argon2"
library = "argon2"
time_cost = 2
memory_cost = 102400
parallelism = 8
def encode(self, password, salt):
argon2 = self._load_library()
params = self.params()
data = argon2.low_level.hash_secret(
password.encode(),
salt.encode(),
time_cost=params.time_cost,
memory_cost=params.memory_cost,
parallelism=params.parallelism,
hash_len=params.hash_len,
type=params.type,
)
return self.algorithm + data.decode("ascii")
def decode(self, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split("$", 1)
assert algorithm == self.algorithm
params = argon2.extract_parameters("$" + rest)
variety, *_, b64salt, hash = rest.split("$")
# Add padding.
b64salt += "=" * (-len(b64salt) % 4)
salt = base64.b64decode(b64salt).decode("latin1")
return {
"algorithm": algorithm,
"hash": hash,
"memory_cost": params.memory_cost,
"parallelism": params.parallelism,
"salt": salt,
"time_cost": params.time_cost,
"variety": variety,
"version": params.version,
"params": params,
}
def verify(self, password, encoded):
argon2 = self._load_library()
algorithm, rest = encoded.split("$", 1)
assert algorithm == self.algorithm
try:
return argon2.PasswordHasher().verify("$" + rest, password)
except argon2.exceptions.VerificationError:
return False
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("variety"): decoded["variety"],
_("version"): decoded["version"],
_("memory cost"): decoded["memory_cost"],
_("time cost"): decoded["time_cost"],
_("parallelism"): decoded["parallelism"],
_("salt"): mask_hash(decoded["salt"]),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
current_params = decoded["params"]
new_params = self.params()
# Set salt_len to the salt_len of the current parameters because salt
# is explicitly passed to argon2.
new_params.salt_len = current_params.salt_len
update_salt = must_update_salt(decoded["salt"], self.salt_entropy)
return (current_params != new_params) or update_salt
def harden_runtime(self, password, encoded):
# The runtime for Argon2 is too complicated to implement a sensible
# hardening algorithm.
pass
def params(self):
argon2 = self._load_library()
# salt_len is a noop, because we provide our own salt.
return argon2.Parameters(
type=argon2.low_level.Type.ID,
version=argon2.low_level.ARGON2_VERSION,
salt_len=argon2.DEFAULT_RANDOM_SALT_LENGTH,
hash_len=argon2.DEFAULT_HASH_LENGTH,
time_cost=self.time_cost,
memory_cost=self.memory_cost,
parallelism=self.parallelism,
)
class BCryptSHA256PasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the bcrypt algorithm (recommended)
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
"""
algorithm = "bcrypt_sha256"
digest = hashlib.sha256
library = ("bcrypt", "bcrypt")
rounds = 12
def salt(self):
bcrypt = self._load_library()
return bcrypt.gensalt(self.rounds)
def encode(self, password, salt):
bcrypt = self._load_library()
password = password.encode()
# Hash the password prior to using bcrypt to prevent password
# truncation as described in #20138.
if self.digest is not None:
# Use binascii.hexlify() because a hex encoded bytestring is str.
password = binascii.hexlify(self.digest(password).digest())
data = bcrypt.hashpw(password, salt)
return "%s$%s" % (self.algorithm, data.decode("ascii"))
def decode(self, encoded):
algorithm, empty, algostr, work_factor, data = encoded.split("$", 4)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"algostr": algostr,
"checksum": data[22:],
"salt": data[:22],
"work_factor": int(work_factor),
}
def verify(self, password, encoded):
algorithm, data = encoded.split("$", 1)
assert algorithm == self.algorithm
encoded_2 = self.encode(password, data.encode("ascii"))
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("work factor"): decoded["work_factor"],
_("salt"): mask_hash(decoded["salt"]),
_("checksum"): mask_hash(decoded["checksum"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return decoded["work_factor"] != self.rounds
def harden_runtime(self, password, encoded):
_, data = encoded.split("$", 1)
salt = data[:29] # Length of the salt in bcrypt.
rounds = data.split("$")[2]
# work factor is logarithmic, adding one doubles the load.
diff = 2 ** (self.rounds - int(rounds)) - 1
while diff > 0:
self.encode(password, salt.encode("ascii"))
diff -= 1
class BCryptPasswordHasher(BCryptSHA256PasswordHasher):
"""
Secure password hashing using the bcrypt algorithm
This is considered by many to be the most secure algorithm but you
must first install the bcrypt library. Please be warned that
this library depends on native C code and might cause portability
issues.
This hasher does not first hash the password which means it is subject to
bcrypt's 72 bytes password truncation. Most use cases should prefer the
BCryptSHA256PasswordHasher.
"""
algorithm = "bcrypt"
digest = None
class ScryptPasswordHasher(BasePasswordHasher):
"""
Secure password hashing using the Scrypt algorithm.
"""
algorithm = "scrypt"
block_size = 8
maxmem = 0
parallelism = 1
work_factor = 2**14
def encode(self, password, salt, n=None, r=None, p=None):
self._check_encode_args(password, salt)
n = n or self.work_factor
r = r or self.block_size
p = p or self.parallelism
hash_ = hashlib.scrypt(
password.encode(),
salt=salt.encode(),
n=n,
r=r,
p=p,
maxmem=self.maxmem,
dklen=64,
)
hash_ = base64.b64encode(hash_).decode("ascii").strip()
return "%s$%d$%s$%d$%d$%s" % (self.algorithm, n, salt, r, p, hash_)
def decode(self, encoded):
algorithm, work_factor, salt, block_size, parallelism, hash_ = encoded.split(
"$", 6
)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"work_factor": int(work_factor),
"salt": salt,
"block_size": int(block_size),
"parallelism": int(parallelism),
"hash": hash_,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(
password,
decoded["salt"],
decoded["work_factor"],
decoded["block_size"],
decoded["parallelism"],
)
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("work factor"): decoded["work_factor"],
_("block size"): decoded["block_size"],
_("parallelism"): decoded["parallelism"],
_("salt"): mask_hash(decoded["salt"]),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return (
decoded["work_factor"] != self.work_factor
or decoded["block_size"] != self.block_size
or decoded["parallelism"] != self.parallelism
)
def harden_runtime(self, password, encoded):
# The runtime for Scrypt is too complicated to implement a sensible
# hardening algorithm.
pass
# RemovedInDjango51Warning.
class SHA1PasswordHasher(BasePasswordHasher):
"""
The SHA1 password hashing algorithm (not recommended)
"""
algorithm = "sha1"
def __init__(self, *args, **kwargs):
warnings.warn(
"django.contrib.auth.hashers.SHA1PasswordHasher is deprecated.",
RemovedInDjango51Warning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def encode(self, password, salt):
self._check_encode_args(password, salt)
hash = hashlib.sha1((salt + password).encode()).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def decode(self, encoded):
algorithm, salt, hash = encoded.split("$", 2)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"salt": salt,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(password, decoded["salt"])
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("salt"): mask_hash(decoded["salt"], show=2),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return must_update_salt(decoded["salt"], self.salt_entropy)
def harden_runtime(self, password, encoded):
pass
class MD5PasswordHasher(BasePasswordHasher):
"""
The Salted MD5 password hashing algorithm (not recommended)
"""
algorithm = "md5"
def encode(self, password, salt):
self._check_encode_args(password, salt)
hash = md5((salt + password).encode()).hexdigest()
return "%s$%s$%s" % (self.algorithm, salt, hash)
def decode(self, encoded):
algorithm, salt, hash = encoded.split("$", 2)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"salt": salt,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(password, decoded["salt"])
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("salt"): mask_hash(decoded["salt"], show=2),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
return must_update_salt(decoded["salt"], self.salt_entropy)
def harden_runtime(self, password, encoded):
pass
# RemovedInDjango51Warning.
class UnsaltedSHA1PasswordHasher(BasePasswordHasher):
"""
Very insecure algorithm that you should *never* use; store SHA1 hashes
with an empty salt.
This class is implemented because Django used to accept such password
hashes. Some older Django installs still have these values lingering
around so we need to handle and upgrade them properly.
"""
algorithm = "unsalted_sha1"
def __init__(self, *args, **kwargs):
warnings.warn(
"django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher is deprecated.",
RemovedInDjango51Warning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def salt(self):
return ""
def encode(self, password, salt):
if salt != "":
raise ValueError("salt must be empty.")
hash = hashlib.sha1(password.encode()).hexdigest()
return "sha1$$%s" % hash
def decode(self, encoded):
assert encoded.startswith("sha1$$")
return {
"algorithm": self.algorithm,
"hash": encoded[6:],
"salt": None,
}
def verify(self, password, encoded):
encoded_2 = self.encode(password, "")
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("hash"): mask_hash(decoded["hash"]),
}
def harden_runtime(self, password, encoded):
pass
# RemovedInDjango51Warning.
class UnsaltedMD5PasswordHasher(BasePasswordHasher):
"""
Incredibly insecure algorithm that you should *never* use; stores unsalted
MD5 hashes without the algorithm prefix, also accepts MD5 hashes with an
empty salt.
This class is implemented because Django used to store passwords this way
and to accept such password hashes. Some older Django installs still have
these values lingering around so we need to handle and upgrade them
properly.
"""
algorithm = "unsalted_md5"
def __init__(self, *args, **kwargs):
warnings.warn(
"django.contrib.auth.hashers.UnsaltedMD5PasswordHasher is deprecated.",
RemovedInDjango51Warning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def salt(self):
return ""
def encode(self, password, salt):
if salt != "":
raise ValueError("salt must be empty.")
return md5(password.encode()).hexdigest()
def decode(self, encoded):
return {
"algorithm": self.algorithm,
"hash": encoded,
"salt": None,
}
def verify(self, password, encoded):
if len(encoded) == 37 and encoded.startswith("md5$$"):
encoded = encoded[5:]
encoded_2 = self.encode(password, "")
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("hash"): mask_hash(decoded["hash"], show=3),
}
def harden_runtime(self, password, encoded):
pass
# RemovedInDjango50Warning.
class CryptPasswordHasher(BasePasswordHasher):
"""
Password hashing using UNIX crypt (not recommended)
The crypt module is not supported on all platforms.
"""
algorithm = "crypt"
library = "crypt"
def __init__(self, *args, **kwargs):
warnings.warn(
"django.contrib.auth.hashers.CryptPasswordHasher is deprecated.",
RemovedInDjango50Warning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
def salt(self):
return get_random_string(2)
def encode(self, password, salt):
crypt = self._load_library()
if len(salt) != 2:
raise ValueError("salt must be of length 2.")
hash = crypt.crypt(password, salt)
if hash is None: # A platform like OpenBSD with a dummy crypt module.
raise TypeError("hash must be provided.")
# we don't need to store the salt, but Django used to do this
return "%s$%s$%s" % (self.algorithm, "", hash)
def decode(self, encoded):
algorithm, salt, hash = encoded.split("$", 2)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"salt": salt,
}
def verify(self, password, encoded):
crypt = self._load_library()
decoded = self.decode(encoded)
data = crypt.crypt(password, decoded["hash"])
return constant_time_compare(decoded["hash"], data)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("salt"): decoded["salt"],
_("hash"): mask_hash(decoded["hash"], show=3),
}
def harden_runtime(self, password, encoded):
pass | PypiClean |
/GDAL-3.7.1.1.tar.gz/GDAL-3.7.1.1/gdal-utils/osgeo_utils/samples/loslas2ntv2.py |
import sys
from osgeo import gdal
# dummy object to hold options
class Options(object):
def __init__(self):
self.verbose_flag = 0
self.append = 0
self.create_self = []
self.metadata = []
self.negate = 0
# =============================================================================
def Usage():
print("Usage: loslas2ntv2.py [-a] [-auto] [-sub_name name] [-parent name]")
print(" [-created date] [-updated date] [-gs_type name]")
print(" [-system_f name] [-system_t name] [-version version]")
print(" [-major_f axis] [-minor_f axis]")
print(" [-major_t axis] [-minor_t axis]")
print(" [-negate] [src_file.los]* dst_file.gsb ")
print("")
print(" -a: append to existing NTv2 file.")
print(" -auto: process a whole directory of nad27/hpgn los/las files.")
print(" -negate: reverse direction of change.")
print("")
print("eg.")
print(" loslas2ntv2.py -auto *.los")
print(
" loslas2ntv2.py -system_f NAD27 -system_t NAD83 -sub_name conus conus.los conus.gsb"
)
return 2
# =============================================================================
def TranslateLOSLAS(los, ntv2_filename, options):
# Open the LOS and LAS files.
los_filename = los[:-4] + ".los"
las_filename = los[:-4] + ".las"
los_db = gdal.Open(los_filename)
las_db = gdal.Open(las_filename)
# Create (or append to) the NTv2 file.
create_options = options.create_options
if options.append == 1:
create_options.append("APPEND_SUBDATASET=YES")
ntv2_driver = gdal.GetDriverByName("NTv2")
ntv2_db = ntv2_driver.Create(
ntv2_filename,
los_db.RasterXSize,
los_db.RasterYSize,
4,
gdal.GDT_Float32,
create_options,
)
# Copy georeferencing
ntv2_db.SetGeoTransform(los_db.GetGeoTransform())
# Copy offsets.
data = las_db.ReadAsArray()
if options.negate:
data = -1 * data
ntv2_db.GetRasterBand(1).WriteArray(data)
data = los_db.ReadAsArray()
if options.negate:
data = -1 * data
ntv2_db.GetRasterBand(2).WriteArray(data)
if options.metadata:
ntv2_db.SetMetadata(options.metadata)
# =============================================================================
# Auto-process the normal NOAA director of los/las files, producing a
# NAD27 NTv2 file, and an HPGN NTv2 file.
def auto_noaa(options, loslas_list):
options.append = 0
options.verbose_flag = 0
original_metadata = options.metadata
have_nad27 = 0
# have_hpgn = 0
for los in loslas_list:
options.create_options = []
options.metadata = original_metadata
if los.find("hpgn") != -1:
ntv2_filename = los[:-4] + ".gsb"
options.append = 0
options.negate = 1
options.metadata.append("SUB_NAME=" + los[:2])
options.metadata.append("MAJOR_F=6378137.0")
options.metadata.append("MINOR_F=6356752.31414")
options.metadata.append("MAJOR_T=6378137.0")
options.metadata.append("MINOR_T=6356752.31414")
options.metadata.append("SYSTEM_F=HARN")
options.metadata.append("SYSTEM_T=NAD83")
else:
ntv2_filename = "nad27_usa.gsb"
options.metadata.append("SUB_NAME=" + los[:-4])
if have_nad27 == 0:
options.append = 0
options.metadata.append("MAJOR_F=6378206.4")
options.metadata.append("MINOR_F=6356583.8")
options.metadata.append("MAJOR_T=6378137.0")
options.metadata.append("MINOR_T=6356752.31414")
options.metadata.append("SYSTEM_F=NAD27")
options.metadata.append("SYSTEM_T=NAD83")
else:
options.append = 1
have_nad27 = 1
print("Integrate %s into %s." % (los, ntv2_filename))
TranslateLOSLAS(los, ntv2_filename, options)
return 0
def main(argv=sys.argv):
ntv2_filename = None
loslas_list = []
auto_flag = 0
options = Options()
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return 0
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == "-v":
options.verbose_flag = 1
elif arg == "-version" and i < len(argv) - 1:
options.metadata.append("VERSION=" + argv[i + 1])
i = i + 1
elif arg == "-created" and i < len(argv) - 1:
options.metadata.append("CREATED=" + argv[i + 1])
i = i + 1
elif arg == "-updated" and i < len(argv) - 1:
options.metadata.append("UPDATED=" + argv[i + 1])
i = i + 1
elif arg == "-system_f" and i < len(argv) - 1:
options.metadata.append("SYSTEM_F=" + argv[i + 1])
i = i + 1
elif arg == "-system_t" and i < len(argv) - 1:
options.metadata.append("SYSTEM_T=" + argv[i + 1])
i = i + 1
elif arg == "-parent" and i < len(argv) - 1:
options.metadata.append("PARENT=" + argv[i + 1])
i = i + 1
elif arg == "-sub_name" and i < len(argv) - 1:
options.metadata.append("SUB_NAME=" + argv[i + 1])
i = i + 1
elif arg == "-gs_type" and i < len(argv) - 1:
options.metadata.append("GS_TYPE=" + argv[i + 1])
i = i + 1
elif arg == "-major_f" and i < len(argv) - 1:
options.metadata.append("MAJOR_F=" + argv[i + 1])
i = i + 1
elif arg == "-minor_f" and i < len(argv) - 1:
options.metadata.append("MINOR_F=" + argv[i + 1])
i = i + 1
elif arg == "-major_t" and i < len(argv) - 1:
options.metadata.append("MAJOR_T=" + argv[i + 1])
i = i + 1
elif arg == "-minor_t" and i < len(argv) - 1:
options.metadata.append("MINOR_T=" + argv[i + 1])
i = i + 1
elif arg == "-negate":
options.negate = 1
elif arg == "-auto":
auto_flag = 1
elif arg == "-a":
options.append = 1
elif arg[0] == "-":
return Usage()
elif arg[-4:] == ".los" or arg[-4:] == ".las":
loslas_list.append(arg)
elif arg[-4:] == ".gsb" and ntv2_filename is None:
ntv2_filename = arg
else:
print("Unrecognized argument: ", arg)
return Usage()
i = i + 1
if not loslas_list:
print("No .los/.las files specified as input.")
return Usage()
if auto_flag == 1:
auto_noaa(options, loslas_list)
if ntv2_filename is None:
print("No NTv2 file specified.")
return Usage()
# Process loslas files.
for los in loslas_list:
TranslateLOSLAS(los, ntv2_filename, options)
options.append = 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | PypiClean |
/Kerapu-2.0.3.tar.gz/Kerapu-2.0.3/kerapu/boom/boom_parameter/__init__.py | from kerapu.boom.boom_parameter.BehandelKlasse import BehandelKlasse
from kerapu.boom.boom_parameter.BoomParameter import BoomParameter
from kerapu.boom.boom_parameter.DiagnoseCluster import DiagnoseCluster
from kerapu.boom.boom_parameter.DiagnoseCode import DiagnoseCode
from kerapu.boom.boom_parameter.Geslacht import Geslacht
from kerapu.boom.boom_parameter.Leeftijd import Leeftijd
from kerapu.boom.boom_parameter.SpecialismeCluster import SpecialismeCluster
from kerapu.boom.boom_parameter.SpecialismeCode import SpecialismeCode
from kerapu.boom.boom_parameter.ZorgActiviteitCluster import ZorgActiviteitCluster
from kerapu.boom.boom_parameter.ZorgActiviteitCode import ZorgActiviteitCode
from kerapu.boom.boom_parameter.ZorgInstellingCode import ZorgInstellingCode
from kerapu.boom.boom_parameter.ZorgTypeCode import ZorgTypeCode
from kerapu.boom.boom_parameter.ZorgVraagCluster import ZorgVraagCluster
from kerapu.boom.boom_parameter.ZorgVraagCode import ZorgVraagCode
# ----------------------------------------------------------------------------------------------------------------------
_boom_parameters = {}
"""
Poel met alle boomparameters
"""
# ----------------------------------------------------------------------------------------------------------------------
def create_boom_parameter(boom_parameter_nummer: int) -> BoomParameter:
"""
Een fabriek met hergebruik voor het maken van boomparameters. Het aanroepen van deze functie met hetzelfde
boomparameternummer zal tekens het zelfde object opleveren.
:param int boom_parameter_nummer: Het nummer van de boomparameter.
:rtype: BoomParameter
"""
if boom_parameter_nummer in _boom_parameters:
return _boom_parameters[boom_parameter_nummer]
_boom_parameters[boom_parameter_nummer] = _create_boom_parameter(boom_parameter_nummer)
return _boom_parameters[boom_parameter_nummer]
# ----------------------------------------------------------------------------------------------------------------------
def _create_boom_parameter(boom_parameter_nummer: int) -> BoomParameter:
"""
Een fabriek voor het maken van boomparameters.
:param int boom_parameter_nummer: Het nummer van de boomparameter.
:rtype: BoomParameter
"""
if boom_parameter_nummer == 100:
return Leeftijd()
if boom_parameter_nummer == 101:
return Geslacht()
if boom_parameter_nummer == 110:
return ZorgInstellingCode()
if boom_parameter_nummer == 111:
# Zorginstellingscluster 1
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 112:
# Zorginstellingscluster 2
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 200:
return SpecialismeCode()
if boom_parameter_nummer == 201:
return SpecialismeCluster(1)
if boom_parameter_nummer == 202:
return SpecialismeCluster(2)
if boom_parameter_nummer == 210:
return ZorgTypeCode()
if boom_parameter_nummer == 211:
# Zorgtypecluster 1
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 212:
# Zorgtypecluster 2
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 220:
return ZorgVraagCode()
if boom_parameter_nummer == 221:
return ZorgVraagCluster(1)
if boom_parameter_nummer == 222:
return ZorgVraagCluster(2)
if boom_parameter_nummer == 230:
return DiagnoseCode()
if boom_parameter_nummer == 231:
# ICD-diagnosecode
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if 232 <= boom_parameter_nummer <= 237:
return DiagnoseCluster(boom_parameter_nummer - 231)
if boom_parameter_nummer == 241:
# Begindatum subtraject
# Deze boomparameter wordt thans niet gebruikt door de grouper.
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 300:
return ZorgActiviteitCode(0)
if 301 <= boom_parameter_nummer <= 310:
return ZorgActiviteitCluster(boom_parameter_nummer - 300, 0)
if boom_parameter_nummer == 351:
return BehandelKlasse(0)
if boom_parameter_nummer == 400:
return ZorgActiviteitCode(1)
if 401 <= boom_parameter_nummer <= 410:
return ZorgActiviteitCluster(boom_parameter_nummer - 400, 1)
if boom_parameter_nummer == 451:
# Behandelklasse – som van (aantal * weegfactor 1)
# Deze boomparameter wordt thans niet gebruikt door de grouper.
# return BehandelKlasse(1)
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
if boom_parameter_nummer == 500:
return ZorgActiviteitCode(2)
if 501 <= boom_parameter_nummer <= 510:
return ZorgActiviteitCluster(boom_parameter_nummer - 500, 2)
if boom_parameter_nummer == 551:
# Behandelklasse – som van (aantal * weegfactor 2)
# Deze boomparameter wordt thans niet gebruikt door de grouper.
# return BehandelKlasse(1)
raise NotImplementedError('Boomparameter %d is niet geïmplementeerd.' % boom_parameter_nummer)
raise RuntimeError("Onbekende boomparameter '%s'." % boom_parameter_nummer)
# ---------------------------------------------------------------------------------------------------------------------- | PypiClean |
/ConsoleCanvas-1.2.5.tar.gz/ConsoleCanvas-1.2.5/README.md |
# ConsoleCanvas用法:
### 0.安装
```shell
pip install ConsoleCanvas -i https://pypi.python.org/simple
```
### 0.1.终端显示cv2的img:
```python
import consoleCanvas,cv2
img=cv2.imread("C:\\Users\\Administrator\\Desktop\\1.png")
consoleCanvas.cvShow(img)
```
### 1.先初始化对象:
```python
import consoleCanvas
A=consoleCanvas.consoleCanvas()
```
### 2.生成画布:
```python
A.ProduceCanvas(80,80)#创建画布
```
### 3.绘制像素点:
```python
A.reviseCanvas([x,y],1)
#[x,y]是坐标,1是代表绘制黑色点,0是绘制白色点
```
### 4.显示画布:
```python
A.show() #显示画布
```
### 5.清空画布:
```python
A.ProduceCanvas(80,80)#也就是重新创建画布
```
### 6.例子
```python
#绘制圆形
import consoleCanvas
a=10
b=10
r=10
A=consoleCanvas.consoleCanvas()#初始化
A.ProduceCanvas(21,21)#创建画布
A.reviseCanvas([0,0],1)
for x in range(a-r,a+r):
y=int((((r**2)-(x-a)**2)**(1/2))+b)
A.reviseCanvas([x,y],1)#绘制画布像素
for x in range(a+r,a-r,-1):
y=int(-1*(((r**2)-(x-a)**2)**(1/2))+b)
A.reviseCanvas([x,y],1)#绘制画布像素
A.show() #显示画布
```
输出:
```shell
⠁⠀⠔⠀⠀⠁⠀⠂⠄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠠⠃⠀⠀⠀⠀⠀⠀⠈⠆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠇⠀⠀⠀⠀⠀⠀⠀⠀⠸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠇⠀⠀⠀⠀⠀⠀⠀⠀⠨⠂⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠇⠀⠀⠀⠀⠀⠀⠀⠀⠸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠈⠆⠀⠀⠀⠀⠀⠀⠠⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠑⠀⠀⠄⠀⠂⠁⠀
``` | PypiClean |
/MADAM-0.21.2-py3-none-any.whl/madam/vector.py | import io
from typing import Any, Callable, IO, Iterable, Mapping, Optional, Tuple
from xml.etree import ElementTree as ET
from madam.core import Asset, Dict, MetadataProcessor, Processor, UnsupportedFormatError, operator
_INCH_TO_MM = 1 / 25.4
_PX_PER_INCH = 90
_PT_PER_INCH = 1 / 72
_FONT_SIZE_PT = 12
_X_HEIGHT = 0.7
def svg_length_to_px(length: Optional[str]) -> float:
if length is None:
raise ValueError()
unit_len = 2
if length.endswith('%'):
unit_len = 1
try:
value = float(length)
unit = 'px'
except ValueError:
value = float(length[:-unit_len])
unit = length[-unit_len:]
if unit == 'em':
return value * _PX_PER_INCH * _FONT_SIZE_PT * _PT_PER_INCH
elif unit == 'ex':
return value * _PX_PER_INCH * _X_HEIGHT * _FONT_SIZE_PT * _PT_PER_INCH
elif unit == 'px':
return value
elif unit == 'in':
return value * _PX_PER_INCH
elif unit == 'cm':
return value * _PX_PER_INCH * _INCH_TO_MM * 10
elif unit == 'mm':
return value * _PX_PER_INCH * _INCH_TO_MM
elif unit == 'pt':
return value * _PX_PER_INCH * _PT_PER_INCH
elif unit == 'pc':
return value * _PX_PER_INCH * _PT_PER_INCH * 12
elif unit == '%':
return value
raise ValueError()
XML_NS = dict(
dc='http://purl.org/dc/elements/1.1/',
rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#',
svg='http://www.w3.org/2000/svg',
xlink='http://www.w3.org/1999/xlink',
)
def _register_xml_namespaces() -> None:
for prefix, uri in XML_NS.items():
if prefix == 'svg':
prefix = ''
ET.register_namespace(prefix, uri)
def _parse_svg(file: IO) -> Tuple[ET.ElementTree, ET.Element]:
_register_xml_namespaces()
try:
tree = ET.parse(file)
except ET.ParseError as e:
raise UnsupportedFormatError(f'Error while parsing XML in line {e.position[0]:d}, column {e.position[1]:d}')
root = tree.getroot()
if root.tag not in ('{%s}svg' % XML_NS['svg'], 'svg'):
raise UnsupportedFormatError('XML file is not an SVG file.')
return tree, root
def _write_svg(tree: ET.ElementTree) -> IO:
file = io.BytesIO()
tree.write(file, xml_declaration=False, encoding='utf-8')
file.seek(0)
return file
class SVGProcessor(Processor):
"""
Represents a processor that handles *Scalable Vector Graphics* (SVG) data.
"""
def __init__(self, config: Optional[Mapping[str, Any]] = None) -> None:
"""
Initializes a new `SVGProcessor`.
:param config: Mapping with settings.
"""
super().__init__(config)
def can_read(self, file: IO) -> bool:
try:
_parse_svg(file)
return True
except UnsupportedFormatError:
return False
def read(self, file: IO) -> Asset:
_, root = _parse_svg(file)
metadata: Dict[str, Any] = dict(mime_type='image/svg+xml')
if 'width' in root.keys():
metadata['width'] = svg_length_to_px(root.get('width'))
if 'height' in root.keys():
metadata['height'] = svg_length_to_px(root.get('height'))
file.seek(0)
return Asset(essence=file, **metadata)
@staticmethod
def __remove_xml_whitespace(elem: ET.Element) -> None:
if elem.text:
elem.text = elem.text.strip()
if elem.tail:
elem.tail = elem.tail.strip()
for child in elem:
SVGProcessor.__remove_xml_whitespace(child)
@staticmethod
def __remove_elements(root: ET.Element, qname: str, keep_func: Callable[[ET.Element], bool]) -> None:
parents = root.findall(f'.//{qname}/..', XML_NS)
for parent in parents:
for elem in parent.findall(f'./{qname}', XML_NS):
if not keep_func(elem):
parent.remove(elem)
@operator
def shrink(self, asset: Asset) -> Asset:
"""
Shrinks the size of an SVG asset.
:param asset: Media asset to be shrunk
:type asset: Asset
:return: Shrunk vector asset
:rtype: Asset
"""
tree, root = _parse_svg(asset.essence)
# Minify XML
SVGProcessor.__remove_xml_whitespace(root)
# Remove empty texts
SVGProcessor.__remove_elements(root, 'svg:text',
lambda e: bool(
e.text and
e.text.strip() or
list(e)
))
# Remove all empty circles with radius 0
SVGProcessor.__remove_elements(root, 'svg:circle',
lambda e: bool(
list(e) or
e.get('r') != '0'
))
# Remove all empty ellipses with x-axis or y-axis radius 0
SVGProcessor.__remove_elements(root, 'svg:ellipse',
lambda e: bool(
list(e) or
e.get('rx') != '0' and
e.get('ry') != '0'
))
# Remove all empty rectangles with width or height 0
SVGProcessor.__remove_elements(root, 'svg:rect',
lambda e: bool(
list(e) or
e.get('width') != '0' and
e.get('height') != '0'
))
# Remove all patterns with width or height 0
SVGProcessor.__remove_elements(root, 'svg:pattern',
lambda e: e.get('width') != '0' and
e.get('height') != '0')
# Remove all images with width or height 0
SVGProcessor.__remove_elements(root, 'svg:image',
lambda e: e.get('width') != '0' and
e.get('height') != '0')
# Remove all paths without coordinates
SVGProcessor.__remove_elements(root, 'svg:path',
lambda e: bool(e.get('d', '').strip()))
# Remove all polygons without points
SVGProcessor.__remove_elements(root, 'svg:polygon',
lambda e: bool(e.get('points', '').strip()))
# Remove all polylines without points
SVGProcessor.__remove_elements(root, 'svg:polyline',
lambda e: bool(e.get('points', '').strip()))
# Remove all invisible or hidden elements
SVGProcessor.__remove_elements(root, '*',
lambda e: e.get('display') != 'none' and
e.get('visibility') != 'hidden' and
e.get('opacity') != '0')
# Remove empty groups
SVGProcessor.__remove_elements(root, 'svg:g',
lambda e: bool(list(e)))
# Remove all invisible or hidden elements
SVGProcessor.__remove_elements(root, 'svg:defs',
lambda e: bool(list(e)))
essence = _write_svg(tree)
metadata = dict(mime_type='image/svg+xml')
for metadata_key in ('width', 'height'):
if metadata_key in asset.metadata:
metadata[metadata_key] = asset.metadata[metadata_key]
return Asset(essence=essence, **metadata)
class SVGMetadataProcessor(MetadataProcessor):
"""
Represents a metadata processor that handles Scalable Vector Graphics (SVG)
data.
It is assumed that the SVG XML uses UTF-8 encoding.
"""
def __init__(self, config: Optional[Mapping[str, Any]] = None) -> None:
"""
Initializes a new `SVGMetadataProcessor`.
:param config: Mapping with settings.
"""
super().__init__(config)
@property
def formats(self) -> Iterable[str]:
return {'rdf'}
def read(self, file: IO) -> Mapping[str, Mapping]:
_, root = _parse_svg(file)
metadata_elem = root.find('./svg:metadata', XML_NS)
if metadata_elem is None or len(metadata_elem) == 0:
return {'rdf': {}}
return {'rdf': {'xml': ET.tostring(metadata_elem[0], encoding='unicode')}}
def strip(self, file: IO) -> IO:
tree, root = _parse_svg(file)
metadata_elem = root.find('./svg:metadata', XML_NS)
if metadata_elem is not None:
root.remove(metadata_elem)
result = _write_svg(tree)
return result
def combine(self, file: IO, metadata: Mapping[str, Mapping]) -> IO:
if not metadata:
raise ValueError('No metadata provided.')
if 'rdf' not in metadata:
raise UnsupportedFormatError('No RDF metadata found.')
rdf = metadata['rdf']
if 'xml' not in rdf:
raise ValueError('XML string missing from RDF metadata.')
tree, root = _parse_svg(file)
metadata_elem = root.find('./svg:metadata', XML_NS)
if metadata_elem is None:
metadata_elem = ET.SubElement(root, '{%(svg)s}metadata' % XML_NS)
metadata_elem.append(ET.fromstring(rdf['xml']))
result = _write_svg(tree)
return result | PypiClean |
/Cuckoo-2.0.7a1.tar.gz/Cuckoo-2.0.7a1/cuckoo/private/db_migration/versions/from_0_6_to_1_1.py | # Revision identifiers, used by Alembic.
revision = "5aa718cc79e1"
mongo_revision = "1"
down_revision = None
import sqlalchemy as sa
import sys
from alembic import op
from datetime import datetime
from dateutil.parser import parse
from cuckoo.common.mongo import mongo
from cuckoo.core.database import Task
old_enum = (
"pending", "processing", "failure", "success",
)
new_enum = (
"pending", "running", "completed", "recovered", "reported",
# These were not actually supported in 1.0 or 1.1, but we have to migrate
# them somewhere (and they're not handled later on either).
"failed_analysis", "failed_processing",
)
mapping = {
"processing": "running",
"failure": "failed_analysis",
"success": "completed",
}
old_type = sa.Enum(*old_enum, name="status_type")
new_type = sa.Enum(*new_enum, name="status_type")
tmp_type = sa.Enum(*set(old_enum + new_enum), name="status_type_temp")
def upgrade():
# BEWARE: be prepared to really spaghetti code. To deal with SQLite limitations in Alembic we coded some workarounds.
# Migrations are supported starting form Cuckoo 0.6 and Cuckoo 1.0; I need a way to figure out if from which release
# it will start because both schema are missing alembic release versioning.
# I check for tags table to distinguish between Cuckoo 0.6 and 1.0.
conn = op.get_bind()
if conn.engine.dialect.has_table(conn.engine.connect(), "machines_tags"):
# If this table exist we are on Cuckoo 1.0 or above.
# So skip SQL migration.
pass
else:
# We are on Cuckoo < 1.0, hopefully 0.6.
# So run SQL migration.
# Create table used by Tag.
op.create_table(
"tags",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(length=255), nullable=False, unique=True),
)
# Create secondary table used in association Machine - Tag.
op.create_table(
"machines_tags",
sa.Column("machine_id", sa.Integer, sa.ForeignKey("machines.id")),
sa.Column("tag_id", sa.Integer, sa.ForeignKey("tags.id")),
)
# Add columns to Machine.
op.add_column("machines", sa.Column("interface", sa.String(length=255), nullable=True))
op.add_column("machines", sa.Column("snapshot", sa.String(length=255), nullable=True))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_ip", sa.String(length=255), server_default="192.168.56.1", nullable=False))
# TODO: change default value, be aware sqlite doesn't support that kind of ALTER statement.
op.add_column("machines", sa.Column("resultserver_port", sa.String(length=255), server_default="2042", nullable=False))
# Deal with Alembic shit.
# Alembic is so ORMish that it was impossible to write code which works on different DBMS.
if conn.engine.driver == "psycopg2":
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False), nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("UPDATE tasks SET clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True)
op.execute("ALTER TABLE tasks ALTER COLUMN status DROP DEFAULT")
tmp_type.create(op.get_bind(), checkfirst=True)
op.execute(
"ALTER TABLE tasks ALTER COLUMN status TYPE status_type_temp "
"USING status::text::status_type_temp"
)
old_type.drop(op.get_bind(), checkfirst=False)
for old_status, new_status in mapping.items():
op.execute(
"UPDATE tasks SET status = '%s' WHERE status = '%s'" %
(new_status, old_status)
)
new_type.create(op.get_bind(), checkfirst=False)
op.execute(
"ALTER TABLE tasks ALTER COLUMN status TYPE status_type "
"USING status::text::status_type"
)
tmp_type.drop(op.get_bind(), checkfirst=False)
op.execute(
"ALTER TABLE tasks ALTER COLUMN status "
"SET DEFAULT 'pending'::status_type"
)
elif conn.engine.driver == "mysqldb":
op.alter_column(
"tasks", "status", existing_type=old_type, type_=tmp_type
)
for old_status, new_status in mapping.items():
op.execute(
"UPDATE tasks SET status = '%s' WHERE status = '%s'" %
(new_status, old_status)
)
op.alter_column(
"tasks", "status", existing_type=tmp_type, type_=new_type
)
# We don"t provide a default value and leave the column as nullable because o further data migration.
op.add_column("tasks", sa.Column("clock", sa.DateTime(timezone=False), nullable=True))
# NOTE: We added this new column so we force clock time to the added_on for old analyses.
conn.execute("UPDATE tasks SET clock=added_on")
# Add the not null constraint.
op.alter_column("tasks", "clock", nullable=False, existing_nullable=True, existing_type=sa.DateTime(timezone=False))
elif conn.engine.driver == "pysqlite":
tasks_data = []
old_tasks = conn.execute(
"SELECT id, target, category, timeout, priority, custom, "
"machine, package, options, platform, memory, "
"enforce_timeout, added_on, started_on, completed_on, status, "
"sample_id FROM tasks"
).fetchall()
for item in old_tasks:
d = {}
d["id"] = item[0]
d["target"] = item[1]
d["category"] = item[2]
d["timeout"] = item[3]
d["priority"] = item[4]
d["custom"] = item[5]
d["machine"] = item[6]
d["package"] = item[7]
d["options"] = item[8]
d["platform"] = item[9]
d["memory"] = item[10]
d["enforce_timeout"] = item[11]
if isinstance(item[12], datetime):
d["added_on"] = item[12]
else:
d["added_on"] = parse(item[12]) if item[12] else None
if isinstance(item[13], datetime):
d["started_on"] = item[13]
else:
d["started_on"] = parse(item[13]) if item[13] else None
if isinstance(item[14], datetime):
d["completed_on"] = item[14]
else:
d["completed_on"] = parse(item[14]) if item[14] else None
d["status"] = mapping.get(item[15], item[15])
d["sample_id"] = item[16]
# Force clock.
# NOTE: We added this new column so we force clock time to
# the added_on for old analyses.
d["clock"] = d["added_on"]
tasks_data.append(d)
# Rename original table.
op.rename_table("tasks", "old_tasks")
# Drop old table.
op.drop_table("old_tasks")
# Drop old Enum.
sa.Enum(name="status_type").drop(op.get_bind(), checkfirst=False)
# Create new table with 1.0 schema.
op.create_table(
"tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("target", sa.String(length=255), nullable=False),
sa.Column("category", sa.String(length=255), nullable=False),
sa.Column("timeout", sa.Integer(), server_default="0", nullable=False),
sa.Column("priority", sa.Integer(), server_default="1", nullable=False),
sa.Column("custom", sa.String(length=255), nullable=True),
sa.Column("machine", sa.String(length=255), nullable=True),
sa.Column("package", sa.String(length=255), nullable=True),
sa.Column("options", sa.String(length=255), nullable=True),
sa.Column("platform", sa.String(length=255), nullable=True),
sa.Column("memory", sa.Boolean(), nullable=False, default=False),
sa.Column("enforce_timeout", sa.Boolean(), nullable=False, default=False),
sa.Column("clock", sa.DateTime(timezone=False), server_default=sa.func.now(), nullable=False),
sa.Column("added_on", sa.DateTime(timezone=False), nullable=False),
sa.Column("started_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("completed_on", sa.DateTime(timezone=False), nullable=True),
sa.Column("status", sa.Enum(*new_enum, name="status_type"), server_default="pending", nullable=False),
sa.Column("sample_id", sa.Integer, sa.ForeignKey("samples.id"), nullable=True),
sa.PrimaryKeyConstraint("id")
)
# Insert data.
op.bulk_insert(Task.__table__, tasks_data)
# Migrate mongo.
mongo_upgrade()
def mongo_upgrade():
"""Migrate mongodb schema and data."""
if mongo.init():
print "Starting MongoDB migration."
mongo.connect()
# Check for schema version and create it.
if "cuckoo_schema" in mongo.db.collection_names():
print "Mongo schema version not expected"
sys.exit()
else:
mongo.db.cuckoo_schema.save({"version": mongo_revision})
else:
print "Mongo reporting module not enabled, skipping mongo migration."
def downgrade():
pass | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/Gyre-Termes/Monospace/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.GyreTermesMathJax_Monospace={directory:"Monospace/Regular",family:"GyreTermesMathJax_Monospace",testString:"\u00A0\uD835\uDE70\uD835\uDE71\uD835\uDE72\uD835\uDE73\uD835\uDE74\uD835\uDE75\uD835\uDE76\uD835\uDE77\uD835\uDE78\uD835\uDE79\uD835\uDE7A\uD835\uDE7B\uD835\uDE7C\uD835\uDE7D",32:[0,0,600,0,0],160:[0,0,600,0,0],120432:[563,0,600,9,591],120433:[563,0,600,43,541],120434:[576,16,600,63,534],120435:[563,0,600,43,520],120436:[563,0,600,43,520],120437:[563,0,600,43,520],120438:[576,16,600,63,562],120439:[563,0,600,53,551],120440:[563,0,600,113,487],120441:[563,16,600,84,583],120442:[563,0,600,43,572],120443:[563,0,600,63,541],120444:[563,0,600,11,593],120445:[563,0,600,22,562],120446:[576,16,600,51,549],120447:[563,0,600,43,499],120448:[576,115,600,51,549],120449:[563,0,600,43,589],120450:[576,16,600,92,508],120451:[563,0,600,72,528],120452:[563,16,600,40,560],120453:[563,0,600,9,591],120454:[563,0,600,20,580],120455:[563,0,600,40,560],120456:[563,0,600,51,549],120457:[563,0,600,103,497],120458:[431,16,600,72,541],120459:[604,16,600,22,541],120460:[431,16,600,84,535],120461:[604,16,600,63,583],120462:[431,16,600,63,520],120463:[604,0,600,105,541],120464:[431,186,600,63,562],120465:[604,0,600,43,551],120466:[610,0,600,92,508],120467:[610,186,600,147,458],120468:[604,0,600,63,541],120469:[604,0,600,92,508],120470:[431,0,600,11,593],120471:[431,0,600,53,541],120472:[431,16,600,72,528],120473:[431,186,600,22,541],120474:[431,186,600,63,583],120475:[427,0,600,84,541],120476:[431,16,600,103,497],120477:[563,16,600,43,499],120478:[417,16,600,43,541],120479:[417,0,600,30,570],120480:[417,0,600,30,570],120481:[417,0,600,51,549],120482:[417,186,600,51,549],120483:[417,0,600,115,489],120822:[618,15,600,113,487],120823:[612,0,600,113,487],120824:[618,0,600,84,478],120825:[618,15,600,96,499],120826:[604,0,600,105,478],120827:[604,15,600,96,499],120828:[618,15,600,136,510],120829:[604,1,600,105,478],120830:[618,15,600,113,487],120831:[618,15,600,136,510]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"GyreTermesMathJax_Monospace"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Monospace/Regular/Main.js"]); | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/codemirror/mode/julia/julia.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("julia", function(config, parserConf) {
function wordRegexp(words, end, pre) {
if (typeof pre === "undefined") { pre = ""; }
if (typeof end === "undefined") { end = "\\b"; }
return new RegExp("^" + pre + "((" + words.join(")|(") + "))" + end);
}
var octChar = "\\\\[0-7]{1,3}";
var hexChar = "\\\\x[A-Fa-f0-9]{1,2}";
var sChar = "\\\\[abefnrtv0%?'\"\\\\]";
var uChar = "([^\\u0027\\u005C\\uD800-\\uDFFF]|[\\uD800-\\uDFFF][\\uDC00-\\uDFFF])";
var asciiOperatorsList = [
"[<>]:", "[<>=]=", "<<=?", ">>>?=?", "=>", "--?>", "<--[->]?", "\\/\\/",
"\\.{2,3}", "[\\.\\\\%*+\\-<>!\\/^|&]=?", "\\?", "\\$", "~", ":"
];
var operators = parserConf.operators || wordRegexp([
"[<>]:", "[<>=]=", "<<=?", ">>>?=?", "=>", "--?>", "<--[->]?", "\\/\\/",
"[\\\\%*+\\-<>!\\/^|&\\u00F7\\u22BB]=?", "\\?", "\\$", "~", ":",
"\\u00D7", "\\u2208", "\\u2209", "\\u220B", "\\u220C", "\\u2218",
"\\u221A", "\\u221B", "\\u2229", "\\u222A", "\\u2260", "\\u2264",
"\\u2265", "\\u2286", "\\u2288", "\\u228A", "\\u22C5",
"\\b(in|isa)\\b(?!\.?\\()"
], "");
var delimiters = parserConf.delimiters || /^[;,()[\]{}]/;
var identifiers = parserConf.identifiers ||
/^[_A-Za-z\u00A1-\u2217\u2219-\uFFFF][\w\u00A1-\u2217\u2219-\uFFFF]*!*/;
var chars = wordRegexp([octChar, hexChar, sChar, uChar], "'");
var openersList = ["begin", "function", "type", "struct", "immutable", "let",
"macro", "for", "while", "quote", "if", "else", "elseif", "try",
"finally", "catch", "do"];
var closersList = ["end", "else", "elseif", "catch", "finally"];
var keywordsList = ["if", "else", "elseif", "while", "for", "begin", "let",
"end", "do", "try", "catch", "finally", "return", "break", "continue",
"global", "local", "const", "export", "import", "importall", "using",
"function", "where", "macro", "module", "baremodule", "struct", "type",
"mutable", "immutable", "quote", "typealias", "abstract", "primitive",
"bitstype"];
var builtinsList = ["true", "false", "nothing", "NaN", "Inf"];
CodeMirror.registerHelper("hintWords", "julia", keywordsList.concat(builtinsList));
var openers = wordRegexp(openersList);
var closers = wordRegexp(closersList);
var keywords = wordRegexp(keywordsList);
var builtins = wordRegexp(builtinsList);
var macro = /^@[_A-Za-z\u00A1-\uFFFF][\w\u00A1-\uFFFF]*!*/;
var symbol = /^:[_A-Za-z\u00A1-\uFFFF][\w\u00A1-\uFFFF]*!*/;
var stringPrefixes = /^(`|([_A-Za-z\u00A1-\uFFFF]*"("")?))/;
var macroOperators = wordRegexp(asciiOperatorsList, "", "@");
var symbolOperators = wordRegexp(asciiOperatorsList, "", ":");
function inArray(state) {
return (state.nestedArrays > 0);
}
function inGenerator(state) {
return (state.nestedGenerators > 0);
}
function currentScope(state, n) {
if (typeof(n) === "undefined") { n = 0; }
if (state.scopes.length <= n) {
return null;
}
return state.scopes[state.scopes.length - (n + 1)];
}
// tokenizers
function tokenBase(stream, state) {
// Handle multiline comments
if (stream.match('#=', false)) {
state.tokenize = tokenComment;
return state.tokenize(stream, state);
}
// Handle scope changes
var leavingExpr = state.leavingExpr;
if (stream.sol()) {
leavingExpr = false;
}
state.leavingExpr = false;
if (leavingExpr) {
if (stream.match(/^'+/)) {
return "operator";
}
}
if (stream.match(/\.{4,}/)) {
return "error";
} else if (stream.match(/\.{1,3}/)) {
return "operator";
}
if (stream.eatSpace()) {
return null;
}
var ch = stream.peek();
// Handle single line comments
if (ch === '#') {
stream.skipToEnd();
return "comment";
}
if (ch === '[') {
state.scopes.push('[');
state.nestedArrays++;
}
if (ch === '(') {
state.scopes.push('(');
state.nestedGenerators++;
}
if (inArray(state) && ch === ']') {
while (state.scopes.length && currentScope(state) !== "[") { state.scopes.pop(); }
state.scopes.pop();
state.nestedArrays--;
state.leavingExpr = true;
}
if (inGenerator(state) && ch === ')') {
while (state.scopes.length && currentScope(state) !== "(") { state.scopes.pop(); }
state.scopes.pop();
state.nestedGenerators--;
state.leavingExpr = true;
}
if (inArray(state)) {
if (state.lastToken == "end" && stream.match(':')) {
return "operator";
}
if (stream.match('end')) {
return "number";
}
}
var match;
if (match = stream.match(openers, false)) {
state.scopes.push(match[0]);
}
if (stream.match(closers, false)) {
state.scopes.pop();
}
// Handle type annotations
if (stream.match(/^::(?![:\$])/)) {
state.tokenize = tokenAnnotation;
return state.tokenize(stream, state);
}
// Handle symbols
if (!leavingExpr && (stream.match(symbol) || stream.match(symbolOperators))) {
return "builtin";
}
// Handle parametric types
//if (stream.match(/^{[^}]*}(?=\()/)) {
// return "builtin";
//}
// Handle operators and Delimiters
if (stream.match(operators)) {
return "operator";
}
// Handle Number Literals
if (stream.match(/^\.?\d/, false)) {
var imMatcher = RegExp(/^im\b/);
var numberLiteral = false;
if (stream.match(/^0x\.[0-9a-f_]+p[\+\-]?[_\d]+/i)) { numberLiteral = true; }
// Integers
if (stream.match(/^0x[0-9a-f_]+/i)) { numberLiteral = true; } // Hex
if (stream.match(/^0b[01_]+/i)) { numberLiteral = true; } // Binary
if (stream.match(/^0o[0-7_]+/i)) { numberLiteral = true; } // Octal
// Floats
if (stream.match(/^(?:(?:\d[_\d]*)?\.(?!\.)(?:\d[_\d]*)?|\d[_\d]*\.(?!\.)(?:\d[_\d]*))?([Eef][\+\-]?[_\d]+)?/i)) { numberLiteral = true; }
if (stream.match(/^\d[_\d]*(e[\+\-]?\d+)?/i)) { numberLiteral = true; } // Decimal
if (numberLiteral) {
// Integer literals may be "long"
stream.match(imMatcher);
state.leavingExpr = true;
return "number";
}
}
// Handle Chars
if (stream.match('\'')) {
state.tokenize = tokenChar;
return state.tokenize(stream, state);
}
// Handle Strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenStringFactory(stream.current());
return state.tokenize(stream, state);
}
if (stream.match(macro) || stream.match(macroOperators)) {
return "meta";
}
if (stream.match(delimiters)) {
return null;
}
if (stream.match(keywords)) {
return "keyword";
}
if (stream.match(builtins)) {
return "builtin";
}
var isDefinition = state.isDefinition || state.lastToken == "function" ||
state.lastToken == "macro" || state.lastToken == "type" ||
state.lastToken == "struct" || state.lastToken == "immutable";
if (stream.match(identifiers)) {
if (isDefinition) {
if (stream.peek() === '.') {
state.isDefinition = true;
return "variable";
}
state.isDefinition = false;
return "def";
}
state.leavingExpr = true;
return "variable";
}
// Handle non-detected items
stream.next();
return "error";
}
function tokenAnnotation(stream, state) {
stream.match(/.*?(?=[,;{}()=\s]|$)/);
if (stream.match('{')) {
state.nestedParameters++;
} else if (stream.match('}') && state.nestedParameters > 0) {
state.nestedParameters--;
}
if (state.nestedParameters > 0) {
stream.match(/.*?(?={|})/) || stream.next();
} else if (state.nestedParameters == 0) {
state.tokenize = tokenBase;
}
return "builtin";
}
function tokenComment(stream, state) {
if (stream.match('#=')) {
state.nestedComments++;
}
if (!stream.match(/.*?(?=(#=|=#))/)) {
stream.skipToEnd();
}
if (stream.match('=#')) {
state.nestedComments--;
if (state.nestedComments == 0)
state.tokenize = tokenBase;
}
return "comment";
}
function tokenChar(stream, state) {
var isChar = false, match;
if (stream.match(chars)) {
isChar = true;
} else if (match = stream.match(/\\u([a-f0-9]{1,4})(?=')/i)) {
var value = parseInt(match[1], 16);
if (value <= 55295 || value >= 57344) { // (U+0,U+D7FF), (U+E000,U+FFFF)
isChar = true;
stream.next();
}
} else if (match = stream.match(/\\U([A-Fa-f0-9]{5,8})(?=')/)) {
var value = parseInt(match[1], 16);
if (value <= 1114111) { // U+10FFFF
isChar = true;
stream.next();
}
}
if (isChar) {
state.leavingExpr = true;
state.tokenize = tokenBase;
return "string";
}
if (!stream.match(/^[^']+(?=')/)) { stream.skipToEnd(); }
if (stream.match('\'')) { state.tokenize = tokenBase; }
return "error";
}
function tokenStringFactory(delimiter) {
if (delimiter.substr(-3) === '"""') {
delimiter = '"""';
} else if (delimiter.substr(-1) === '"') {
delimiter = '"';
}
function tokenString(stream, state) {
if (stream.eat('\\')) {
stream.next();
} else if (stream.match(delimiter)) {
state.tokenize = tokenBase;
state.leavingExpr = true;
return "string";
} else {
stream.eat(/[`"]/);
}
stream.eatWhile(/[^\\`"]/);
return "string";
}
return tokenString;
}
var external = {
startState: function() {
return {
tokenize: tokenBase,
scopes: [],
lastToken: null,
leavingExpr: false,
isDefinition: false,
nestedArrays: 0,
nestedComments: 0,
nestedGenerators: 0,
nestedParameters: 0,
firstParenPos: -1
};
},
token: function(stream, state) {
var style = state.tokenize(stream, state);
var current = stream.current();
if (current && style) {
state.lastToken = current;
}
return style;
},
indent: function(state, textAfter) {
var delta = 0;
if ( textAfter === ']' || textAfter === ')' || /^end\b/.test(textAfter) ||
/^else/.test(textAfter) || /^catch\b/.test(textAfter) || /^elseif\b/.test(textAfter) ||
/^finally/.test(textAfter) ) {
delta = -1;
}
return (state.scopes.length + delta) * config.indentUnit;
},
electricInput: /\b(end|else|catch|finally)\b/,
blockCommentStart: "#=",
blockCommentEnd: "=#",
lineComment: "#",
closeBrackets: "()[]{}\"\"",
fold: "indent"
};
return external;
});
CodeMirror.defineMIME("text/x-julia", "julia");
}); | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/hpc_rl/wrapper.py | import importlib
from ditk import logging
from collections import OrderedDict
from functools import wraps
import ding
'''
Overview:
`hpc_wrapper` is the wrapper for functions which are supported by hpc. If a function is wrapped by it, we will
search for its hpc type and return the function implemented by hpc.
We will use the following code as a sample to introduce `hpc_wrapper`:
```
@hpc_wrapper(shape_fn=shape_fn_dntd, namedtuple_data=True, include_args=[0,1,2,3],
include_kwargs=['data', 'gamma', 'v_min', 'v_max'], is_cls_method=False)
def dist_nstep_td_error(
data: namedtuple,
gamma: float,
v_min: float,
v_max: float,
n_atom: int,
nstep: int = 1,
) -> torch.Tensor:
...
```
Parameters:
- shape_fn (:obj:`function`): a function which return the shape needed by hpc function. In fact, it returns
all args that the hpc function needs.
- nametuple_data (:obj:`bool`): If True, when hpc function is called, it will be called as hpc_function(*nametuple).
If False, nametuple data will remain its `nametuple` type.
- include_args (:obj:`list`): a list of index of the args need to be set in hpc function. As shown in the sample,
include_args=[0,1,2,3], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function.
- include_kwargs (:obj:`list`): a list of key of the kwargs need to be set in hpc function. As shown in the sample,
include_kwargs=['data', 'gamma', 'v_min', 'v_max'], which means `data`, `gamma`, `v_min` and `v_max` will be
set in hpc function.
- is_cls_method (:obj:`bool`): If True, it means the function we wrap is a method of a class. `self` will be put
into args. We will get rid of `self` in args. Besides, we will use its classname as its fn_name.
If False, it means the function is a simple method.
Q&A:
- Q: Is `include_args` and `include_kwargs` need to be set at the same time?
- A: Yes. `include_args` and `include_kwargs` can deal with all type of input, such as (data, gamma, v_min=v_min,
v_max=v_max) and (data, gamma, v_min, v_max).
- Q: What is `hpc_fns`?
- A: Here we show a normal `hpc_fns`:
```
hpc_fns = {
'fn_name1': {
'runtime_name1': hpc_fn1,
'runtime_name2': hpc_fn2,
...
},
...
}
```
Besides, `per_fn_limit` means the max length of `hpc_fns[fn_name]`. When new function comes, the oldest
function will be popped from `hpc_fns[fn_name]`.
'''
hpc_fns = {}
per_fn_limit = 3
def register_runtime_fn(fn_name, runtime_name, shape):
fn_name_mapping = {
'gae': ['hpc_rll.rl_utils.gae', 'GAE'],
'dist_nstep_td_error': ['hpc_rll.rl_utils.td', 'DistNStepTD'],
'LSTM': ['hpc_rll.torch_utils.network.rnn', 'LSTM'],
'ppo_error': ['hpc_rll.rl_utils.ppo', 'PPO'],
'q_nstep_td_error': ['hpc_rll.rl_utils.td', 'QNStepTD'],
'q_nstep_td_error_with_rescale': ['hpc_rll.rl_utils.td', 'QNStepTDRescale'],
'ScatterConnection': ['hpc_rll.torch_utils.network.scatter_connection', 'ScatterConnection'],
'td_lambda_error': ['hpc_rll.rl_utils.td', 'TDLambda'],
'upgo_loss': ['hpc_rll.rl_utils.upgo', 'UPGO'],
'vtrace_error_discrete_action': ['hpc_rll.rl_utils.vtrace', 'VTrace'],
}
fn_str = fn_name_mapping[fn_name]
cls = getattr(importlib.import_module(fn_str[0]), fn_str[1])
hpc_fn = cls(*shape).cuda()
if fn_name not in hpc_fns:
hpc_fns[fn_name] = OrderedDict()
hpc_fns[fn_name][runtime_name] = hpc_fn
while len(hpc_fns[fn_name]) > per_fn_limit:
hpc_fns[fn_name].popitem(last=False)
# print(hpc_fns)
return hpc_fn
def hpc_wrapper(shape_fn=None, namedtuple_data=False, include_args=[], include_kwargs=[], is_cls_method=False):
def decorate(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ding.enable_hpc_rl:
shape = shape_fn(args, kwargs)
if is_cls_method:
fn_name = args[0].__class__.__name__
else:
fn_name = fn.__name__
runtime_name = '_'.join([fn_name] + [str(s) for s in shape])
if fn_name not in hpc_fns or runtime_name not in hpc_fns[fn_name]:
hpc_fn = register_runtime_fn(fn_name, runtime_name, shape)
else:
hpc_fn = hpc_fns[fn_name][runtime_name]
if is_cls_method:
args = args[1:]
clean_args = []
for i in include_args:
if i < len(args):
clean_args.append(args[i])
nouse_args = list(set(list(range(len(args)))).difference(set(include_args)))
clean_kwargs = {}
for k, v in kwargs.items():
if k in include_kwargs:
if k == 'lambda_':
k = 'lambda'
clean_kwargs[k] = v
nouse_kwargs = list(set(kwargs.keys()).difference(set(include_kwargs)))
if len(nouse_args) > 0 or len(nouse_kwargs) > 0:
logging.warn(
'in {}, index {} of args are dropped, and keys {} of kwargs are dropped.'.format(
runtime_name, nouse_args, nouse_kwargs
)
)
if namedtuple_data:
data = args[0] # args[0] is a namedtuple
return hpc_fn(*data, *clean_args[1:], **clean_kwargs)
else:
return hpc_fn(*clean_args, **clean_kwargs)
else:
return fn(*args, **kwargs)
return wrapper
return decorate | PypiClean |
/HISpectralModel-0.1.0.tar.gz/HISpectralModel-0.1.0/hispectrum/hiutils/local_exceptions.py |
# Name: local_exceptions
#
# Author: Ian Stewart
#
# TODO:
# - Find all instances of raising EmptyMethod rather than EmptyMethod() and fix them.
#
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Copyright (C) 2014 Ian M Stewart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For the GNU General Public License, see <http://www.gnu.org/licenses/>.
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# History (version, date, change author):
#
# 2014-05-14 IMS/AIfA
#.......................................................................
# * ims_exceptions.py copied to this release version, renamed, and all but relevant code removed.
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""This contains several commonly-used exceptions."""
_module_name = 'local_exceptions'
#.......................................................................
class ExceededMaxNumIter(Exception):
def __init__(self, maxNumIter):
self.maxNumIter = maxNumIter
def __str__(self):
return 'Maximum permitted number of iterations %d exceeded.' % (self.maxNumIter)
class EmptyMethod(Exception):
def __str__(self):
return 'This method should be implemented in a subclass.'
class FailedTest(Exception):
def __init__(self, test):
self.test = test
def __str__(self):
return 'Failed test %s' % (self.test)
class GracefulStop(Exception):
def __init__(self, gracefulStopFile):
self.gracefulStopFile = gracefulStopFile
def __str__(self):
return 'Graceful stop file %s is present.' % (self.gracefulStopFile)
class NonmatchingShapes(Exception):
def __init__(self, shape1, name1, shape2, name2):
self.shape1 = shape1
self.shape2 = shape2
self.name1 = name1
self.name2 = name2
def __str__(self):
return "Shape %s for array %s doesn't match shape %s for array %s." % (str(self.shape1), self.name1, str(self.shape2), self.name2)
class NotYetImplemented(Exception): pass
# def __str__(self):
# return "This choice is not yet implemented."
class ObsoleteModule(Exception):
def __init__(self, name):
self.moduleName = name
def __str__(self):
return 'This module %s is obsolete. Please use a later one.' % (self.moduleName)
class OutOfRange(Exception):
def __init__(self, rangeObject, arg):
self.rangeObject = rangeObject
self.arg = arg
def __str__(self):
return 'Argument %f was outside the range %s.' % (self.arg, self.rangeObject)
class Report(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Bug(Report):
def __init__(self, message):
Report.__init__(self, 'Bug! '+message)
class ShellCommandFailed(Report):
def __init__(self, message):
Report.__init__(self, message)
class TestStop(Exception):
def __str__(self):
return 'Stopping here for test purposes.'
class UnrecognizedChoiceObject(Exception):
def __init__(self, choiceObject, message=None):
self.choiceObject = choiceObject
self.message = message
def __str__(self):
if self.message==None:
return 'Choice %s was not recognized.' % (str(self.choiceObject))
else:
return '%s: choice %s was not recognized.' % (self.message, str(self.choiceObject))
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
if __name__ == '__main__':
raise NotYetImplemented() | PypiClean |
/BooleanNet-1.2.8.tar.gz/BooleanNet-1.2.8/boolean2/ply/lex.py |
__version__ = "2.3"
import re, sys, types
# Regular expression used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Available instance types. This is used when lexers are defined by a class.
# It's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
def skip(self,n):
self.lexer.skip(n)
# -----------------------------------------------------------------------------
# Lexer class
#
# This class encapsulates all of the methods and data associated with a lexer.
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = Lexer()
c.lexstatere = self.lexstatere
c.lexstateinfo = self.lexstateinfo
c.lexstateretext = self.lexstateretext
c.lexstate = self.lexstate
c.lexstatestack = self.lexstatestack
c.lexstateignore = self.lexstateignore
c.lexstateerrorf = self.lexstateerrorf
c.lexreflags = self.lexreflags
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lextokens = self.lextokens
c.lexdebug = self.lexdebug
c.lineno = self.lineno
c.lexoptimize = self.lexoptimize
c.lexliterals = self.lexliterals
c.lexmodule = self.lexmodule
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
# Set up other attributes
c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile):
tf = open(tabfile+".py","w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
exec "import %s as lextab" % tabfile
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
raise ValueError, "Expected a string"
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not self.lexstatere.has_key(state):
raise ValueError, "Undefined state"
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Set last match in lexer so that rules can access it if they want
self.lexmatch = m
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
lexpos = m.end()
i = m.lastindex
func,tok.type = lexindexfunc[i]
self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
if tok.type: return tok
break
# if func not callable, it means it's an ignored token
if not callable(func):
break
# If token is processed by a function, call it
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not self.lextokens.has_key(newtok.type):
raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError, "No input string given with input()"
return None
# -----------------------------------------------------------------------------
# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def _validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist):
result = []
for f in funclist:
if f and f[0]:
result.append((f[0].__name__,f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[handle.__name__])
elif handle is not None:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex]
except Exception,e:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not names.has_key(parts[i]) and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names.keys())
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
error = 0
files = { }
lexobj = Lexer()
lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token,input
if nowarn: warn = 0
else: warn = 1
if object: module = object
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for (i,v) in _items:
ldict[i] = v
else:
raise ValueError,"Expected a module or instance"
lexobj.lexmodule = module
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Get the tokens, states, and literals variables (if any)
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
states = getattr(module,"states",None)
literals = getattr(module,"literals","")
else:
tokens = ldict.get("tokens",None)
states = ldict.get("states",None)
literals = ldict.get("literals","")
if not tokens:
raise SyntaxError,"lex: module does not define 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError,"lex: tokens must be a list or tuple."
# Build a dictionary of valid token names
lexobj.lextokens = { }
if not optimize:
for n in tokens:
if not _is_identifier.match(n):
print >>sys.stderr, "lex: Bad token name '%s'" % n
error = 1
if warn and lexobj.lextokens.has_key(n):
print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
lexobj.lextokens[n] = None
else:
for n in tokens: lexobj.lextokens[n] = None
if debug:
print "lex: tokens = '%s'" % lexobj.lextokens.keys()
try:
for c in literals:
if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
error = 1
continue
except TypeError:
print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
error = 1
lexobj.lexliterals = literals
# Build statemap
if states:
if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
print >>sys.stderr, "lex: states must be defined as a tuple or list."
error = 1
else:
for s in states:
if not isinstance(s,types.TupleType) or len(s) != 2:
print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
error = 1
continue
name, statetype = s
if not isinstance(name,types.StringType):
print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
error = 1
continue
if stateinfo.has_key(name):
print >>sys.stderr, "lex: state '%s' already defined." % name
error = 1
continue
stateinfo[name] = statetype
# Get a list of symbols with the t_ or s_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
funcsym = { } # Symbols defined as functions
strsym = { } # Symbols defined as strings
toknames = { } # Mapping of symbols to token names
for s in stateinfo.keys():
funcsym[s] = []
strsym[s] = []
ignore = { } # Ignore strings by state
errorf = { } # Error functions by state
if len(tsymbols) == 0:
raise SyntaxError,"lex: no rules of the form t_rulename are defined."
for f in tsymbols:
t = ldict[f]
states, tokname = _statetoken(f,stateinfo)
toknames[f] = tokname
if callable(t):
for s in states: funcsym[s].append((f,t))
elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
for s in states: strsym[s].append((f,t))
else:
print >>sys.stderr, "lex: %s not defined as a function or string" % f
error = 1
# Sort the functions by line number
for f in funcsym.values():
f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
# Sort the strings by regular expression length
for s in strsym.values():
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
regexs = { }
# Build the master regular expressions
for state in stateinfo.keys():
regex_list = []
# Add rules defined by functions first
for fname, f in funcsym[state]:
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
files[file] = None
tokname = toknames[fname]
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.func_code.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
error = 1
continue
if nargs < reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
error = 1
continue
if tokname == 'ignore':
print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
error = 1
continue
if tokname == 'error':
errorf[state] = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
if c.match(""):
print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
error = 1
continue
except re.error,e:
print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
if '#' in f.__doc__:
print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
else:
print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
# Now add all of the simple rules
for name,r in strsym[state]:
tokname = toknames[name]
if tokname == 'ignore':
if "\\" in r:
print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
ignore[state] = r
continue
if not optimize:
if tokname == 'error':
raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
error = 1
continue
if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
if (c.match("")):
print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
error = 1
continue
except re.error,e:
print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
if '#' in r:
print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
regex_list.append("(?P<%s>%s)" % (name,r))
if not regex_list:
print >>sys.stderr, "lex: No rules defined for state '%s'" % state
error = 1
regexs[state] = regex_list
if not optimize:
for f in files.keys():
if not _validate_file(f):
error = 1
if error:
raise SyntaxError,"lex: Unable to build lexer."
# From this point forward, we're reasonably confident that we can build the lexer.
# No more errors will be generated, but there might be some warning messages.
# Build the master regular expressions
for state in regexs.keys():
lexre, re_text = _form_master_re(regexs[state],reflags,ldict,toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
if debug:
for i in range(len(re_text)):
print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
# For inclusive states, we need to add the INITIAL state
for state,type in stateinfo.items():
if state != "INITIAL" and type == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = errorf
lexobj.lexerrorf = errorf.get("INITIAL",None)
if warn and not lexobj.lexerrorf:
print >>sys.stderr, "lex: Warning. no t_error rule is defined."
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if warn and not errorf.has_key(s):
print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
if warn and not ignore.has_key(s) and lexobj.lexignore:
print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
elif stype == 'inclusive':
if not errorf.has_key(s):
errorf[s] = errorf.get("INITIAL",None)
if not ignore.has_key(s):
ignore[s] = ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print "Reading from standard input (type EOF to end):"
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN | PypiClean |
/NeuroTS-3.4.0-py3-none-any.whl/neurots/generate/algorithms/basicgrower.py |
# Copyright (C) 2021 Blue Brain Project, EPFL
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from neurots.generate.algorithms.abstractgrower import AbstractAlgo
from neurots.generate.algorithms.common import bif_methods
from neurots.generate.algorithms.common import section_data
logger = logging.getLogger(__name__)
class TrunkAlgo(AbstractAlgo):
"""TreeGrower basic growth of trunks class.
Args:
input_data (dict): All the data required for the growth.
params (dict): The parameters required for growth.
start_point (list[float]): The first point of the trunk.
context (Any): An object containing contextual information.
"""
def __init__(self, input_data, params, start_point, context=None, **_):
"""Constructor of the TrunkAlgo class."""
super().__init__(input_data, params, start_point, context)
self.bif_method = bif_methods[params["branching_method"]]
def initialize(self):
"""Generates the data to be used for the initialization of the first section to be grown.
Saves the extracted input data into the corresponding structures.
"""
stop = {"num_seg": self.params["num_seg"]}
num_sec = 1 # A single section per tree will be generated
return stop, num_sec
def bifurcate(self, current_section):
"""When the section bifurcates two new sections are created.
This method computes from the current state the data required for the
generation of two new sections and returns the corresponding dictionaries.
Args:
current_section (neurots.generate.section.SectionGrowerPath): The current section.
Returns:
tuple[dict, dict]: Two dictionaries containing the two children sections data.
"""
dir1, dir2 = self.bif_method()
first_point = np.array(current_section.last_point)
stop = current_section.stop_criteria
return (
section_data(dir1, first_point, stop, current_section.process),
section_data(dir2, first_point, stop, current_section.process),
)
def terminate(self, current_section):
"""Terminate the current section.
When the growth of a section is terminated the "term" must be removed from the TMD grower.
"""
def extend(self, current_section):
"""Extend the current section.
Create a section with the selected parameters until at least one stop criterion is
fulfilled.
"""
return current_section.next()
class AxonAlgo(TrunkAlgo):
"""TreeGrower of axon growth.
Only a trunk with one segment is synthesized and another process is supposed to gaft an actual
axon on this trunk.
"""
def __init__(self, *args, **kwargs):
# Force num_seg in params to 1
params = kwargs.get("params", None) or args[1]
params["num_seg"] = 1
super().__init__(*args, **kwargs) | PypiClean |
/Hummingbird-XFEL-1.3b0.tar.gz/Hummingbird-XFEL-1.3b0/hummingbird/analysis/hitfinding.py | from __future__ import (absolute_import, # Compatibility with python 2 and 3
print_function)
import collections
import numpy as np
from hummingbird import ipc
from hummingbird.backend import add_record
hitrate_counters = {}
hit_counters = {}
def countHits(evt, hit, outkey="nrHits"):
"""Counts hits and adds the total nr. of hits to ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:hit: A boolean (True for hit, False for miss)
Kwargs:
:outkey(str): Data key of resulting :func:`~backend.Record` object, default is "nrHits"
:Authors:
Benedikt J. Daurer ([email protected]),
Jonas Sellberg,
Tomas Ekeberg
"""
global hit_counters
if outkey not in hit_counters:
hit_counters[outkey] = 0
if hit:
hit_counters[outkey] += 1
v = evt["analysis"]
add_record(v, "analysis", outkey, hit_counters[outkey])
def hitrate(evt, hit, history=100, unit='percent', outkey="hitrate"):
"""Counts hits and adds current hit rate to ``evt["analysis"][outkey]``.
Args:
:evt: The event variable
:hit: A boolean (True for hit, False for miss)
Kwargs:
:history(int): Buffer length, default = 100
:outkey(str): Data key of resulting :func:`~backend.Record` object, default is "hitrate"
:unit(str): Unit of hitrate, 'fraction' or 'percent', default is 'percent'
:Authors:
Benedikt J. Daurer ([email protected]),
Tomas Ekeberg
"""
hit = np.atleast_1d(hit)
global hitrate_counters
if outkey not in hitrate_counters or hitrate_counters[outkey].maxlen != history:
hitrate_counters[outkey] = collections.deque([], history)
for h in hit:
hitrate_counters[outkey].append(bool(h))
hitcount = np.array(hitrate_counters[outkey].count(True))
ipc.mpi.sum("hitcount - " + outkey, hitcount)
v = evt["analysis"]
if (ipc.mpi.is_main_event_reader()):
hitrate = hitcount[()] / (ipc.mpi.nr_event_readers() * float(len(hitrate_counters[outkey])))
if unit == 'fraction':
add_record(v, "analysis", outkey, hitrate)
elif unit == 'percent':
add_record(v, "analysis", outkey, 100.*hitrate)
def countLitPixels(evt, record, aduThreshold=20, hitscoreThreshold=200, hitscoreDark=0, hitscoreMax=None, mask=None, stack=False, outkey="litpixel: "):
"""A simple hitfinder that counts the number of lit pixels and
adds the result to ``evt["analysis"][outkey + "isHit"]``, ``evt["analysis"][outkey + "isMiss"]``,
and the hitscore to ``evt["analysis"][outkey + "hitscore"]``.
Args:
:evt: The event variable
:record: A pixel detector :func:`~backend.Record` object
Kwargs:
:aduThreshold(int): only pixels above this threshold (in ADUs) are valid, default=20
:hitscoreThreshold(int): events with hitscore (Nr. of lit pixels) above this threshold are hits, default=200
:hitscoreMax(int): events with hitscore (Nr. of lit pixels) below this threshold (if not None) are hits, default=None
:hitscoreDark(int): events with hitscore (Nr. of lit pixels) above this threshold are not darks (so either hit or miss), default=0
:mask(int, bool): only use masked pixel (mask == True or 1) for counting the nr. of lit pixels
:outkey(str): Prefix of data key of resulting :func:`~backend.Record` object, default is "litpixel: "
:Authors:
Benedikt J. Daurer ([email protected])
"""
if(mask is None):
mask = 1
hitscore = ((record.data*mask) > aduThreshold).sum(axis=(0,1) if stack is True else None)
hit = np.array(hitscore > hitscoreThreshold, dtype='int')
if hitscoreMax is not None:
hit *= np.array(hitscore <= hitscoreMax, dtype='int')
miss = np.array((~hit) & (hitscore > hitscoreDark), dtype='int')
v = evt["analysis"]
add_record(v, "analysis", outkey + "isHit", hit)
add_record(v, "analysis", outkey + "isMiss", miss)
add_record(v, "analysis", outkey + "hitscore", hitscore)
def countTof(evt, record, signalThreshold=1, minWindow=0, maxWindow=-1, hitscoreThreshold=2, outkey="tof: "):
"""A simple hitfinder that performs a peak counting test on a time-of-flight detector signal,
in a specific subwindow and adds the result to ``evt["analysis"][outkey + "isHit"]``,
and the hitscore to ``evt["analysis"][outkey + "hitscore"]``.
Args:
:evt: The event variable
:record: A ToF detector :func:`~backend.Record` object
Kwargs:
:signalThreshold(str): The threshold of the signal, anything above this contributes to the score, default=1
:minWindow(int): Lower limit of window, default=0
:maxWindow(int): Upper limit of window, default=1
:hitscoreThreshold(int): events with hitscore (Nr. of photons) above this threshold are hits, default=2
:outkey(str): Prefix of data key of resulting :func:`~backend.Record` object, default is "tof: "
:Authors:
Carl Nettelblad ([email protected])
"""
hitscore = record.data[minWindow:maxWindow] > signalThreshold
hit = hitscore > hitscoreThreshold
v = evt["analysis"]
add_record(v, "analysis", outkey + "isHit", hit)
add_record(v, "analysis", outkey + "hitscore", hitscore)
def countHitscore(evt, hitscore, hitscoreThreshold=200, outkey="predef: "):
"""A simple hitfinder that performs a limit test against an already defined hitscore
and adds the result to ``evt["analysis"][outkey + "isHit"]``, and
the hitscore to ``evt["analysis"][outkey + "hitscore"]``.
Args:
:evt: The event variable
:hitscore: A pre-defined hitscore
Kwargs:
:hitscoreThreshold(int): Events with hitscore above this threshold are hits, default=200
:outkey(str): Prefix of data key of resulting :func:`~backend.Record` object, default is "predef: "
:Authors:
Carl Nettelblad ([email protected]),
Benedikt J. Daurer
"""
hit = hitscore > hitscoreThreshold
v = evt["analysis"]
add_record(v, "analysis", outkey + "isHit", hit)
add_record(v, "analysis", outkey + "hitscore", hitscore)
def countPhotonsAgainstEnergyFunction(evt, photonscore_record, energy_record, energyFunction = lambda x : 200, outkey="photons: "):
"""A hitfinder that tests given photon score (e.g. photon count) against a predicted photon threshold
that is dependent on some given energy and
adds a boolean to ``evt["analysis"][outkey + "isHit"]``, the hitscore to ``evt["analysis"][outkey + "hitscore"]`` and
the limit to ``evt["analysis"][outkey + "photonLimit"]
Args:
:evt: The event variable
:photonscore_record: A :func:`~backend.Record` object containing a photon score, e.g. total photon count
:energy_record:" A :func:`~backend.Record` object containing an energy value, e.g. from gas monitor detector
Kwargs:
:energyFunction(function with double argument): Function that computes the photon threshold, given the energy
:outkey(str): Prefix of data key of resulting :func:`~backend.Record` object, default is "photons: "
:Authors:
Carl Nettelblad ([email protected])
"""
score = photonscore_record.data
energy = energy_record.data
photonLimit = energyFunction(energy)
v = evt["analysis"]
hit = score > photonLimit
add_record(v, "analysis", outkey + "isHit", hit)
add_record(v, "analysis", outkey + "photonLimit", photonLimit)
add_record(v, "analysis", outkey + "hitscore", score)
def countPhotonsAgainstEnergyPolynomial(evt, photonscore_record, energy_record, energyPolynomial = [200], outkey="photons: "):
"""A hitfinder that tests photon score (e.g. photon count) against a predicted photon threshold
that is dependent on some given energy and
adds a boolean to ``evt["analysis"][outkey + "isHit"]``, the hitscore to ``evt["analysis"][outkey + "hitscore"]`` and
the limit to ``evt["analysis"][outkey + "photonLimit"]
Args:
:evt: The event variable
:photonscore_record: A :func:`~backend.Record` object containting a photon score, e.g. total photon count
:energy_record:" A :func:`~backend.Record` object containting an energy value, e.g. from gas monitor detector
Kwargs:
:energyPolynomial: array_like with polynomial coefficients fed to polyval (polynomial order one less than list length)
:outkey(str): Prefix of data key of resulting :func:`~backend.Record` object, default is "photons: "
:Authors:
Carl Nettelblad ([email protected])
"""
countPhotonsAgainstEnergyFunction(evt, photonscore_record, energy_record, lambda x : np.polyval(energyPolynomial, x), outkey=outkey)
def photon_count_frame(evt,front_type_s,front_key_s,aduThreshold,outkey=""):
photon_frame = (evt[front_type_s][front_key_s].data/aduThreshold).round()
photon_frame[photon_frame<=0] = 0
v = evt["analysis"]
add_record(v, "analysis", outkey+"photon_count", photon_frame)
def lambda_values(evt,pulse_energy,sum_over_bkg_frames,fit_bkg,sample_params,outkey=""):
frame_expected_phc = np.dot(sample_params,np.array([pulse_energy**3,pulse_energy**2,pulse_energy,1]))
lambdav = sum_over_bkg_frames*frame_expected_phc/fit_bkg.sum()
lambdav[lambdav<=0] = 1e-30
v = evt["analysis"]
add_record(v, "analysis", outkey+"lambda_values", lambdav)
add_record(v, "analysis", outkey+"expected_phc", frame_expected_phc)
def baglivo_score(evt,poisson_mask,outkey=""):
#poisson_mask = poisson_mask.astype(bool)
N = evt["analysis"]["expected_phc"].data
observed_phc = evt["analysis"]["photon_count"].data[poisson_mask]
lambda_values = evt["analysis"]["lambda_values"].data[poisson_mask]
normalized_lambdas = lambda_values/lambda_values.sum()
partial_sum = observed_phc*(np.log(observed_phc) - np.log(normalized_lambdas) - np.log(N))
partial_sum[observed_phc==0] = 0
logval = partial_sum.sum()
v = evt["analysis"]
add_record(v, "analysis", outkey+"baglivo_score", logval)
def stat_hitfinder(evt,pulse_energy,thr_params,bag_bkg,outkey="bagscore: "):
thr = thr_params[0]*pulse_energy + thr_params[1] + 2*bag_bkg.std()
hit = evt["analysis"]["baglivo_score"].data > thr
v = evt["analysis"]
add_record(v, "analysis", outkey+"isHit", hit)
add_record(v, "analysis", outkey+"threshold", thr)
def generate_radial_mask(mask,cx,cy,radius):
[dimy,dimx] = mask.shape
x = np.arange(dimx)-cx
y = np.arange(dimy)-cy
X,Y = np.meshgrid(x,y)
R = np.sqrt(X**2+Y**2)
mask2 = mask.copy()
mask2[R > radius] = 0
return mask2 | PypiClean |
/GenIce-1.0.11.tar.gz/GenIce-1.0.11/genice/lattices/Struct63.py | pairs="""
179 160
1 168
66 30
159 66
17 104
170 70
69 64
63 101
97 21
35 186
13 137
205 192
3 92
151 59
91 113
123 192
130 111
100 106
92 205
173 101
13 160
33 57
111 82
141 110
194 175
118 68
86 4
190 120
47 198
88 31
126 199
11 29
56 29
168 109
167 168
150 151
137 39
60 26
167 93
10 192
51 88
141 80
59 205
164 3
28 99
25 106
111 178
8 81
105 129
189 102
183 132
152 102
8 157
127 81
79 84
112 127
60 129
37 81
93 128
191 73
116 194
107 92
142 187
178 113
83 185
66 59
145 95
164 26
43 82
2 137
37 166
149 56
76 89
5 130
31 93
138 50
195 7
167 6
207 196
157 140
2 107
58 89
15 143
30 126
119 20
73 94
30 166
172 193
142 204
23 82
150 202
74 175
28 101
43 19
161 34
35 155
147 12
125 41
80 50
49 102
36 116
163 17
102 34
112 59
37 152
171 175
177 191
18 120
71 144
98 61
172 148
91 115
13 166
134 176
51 132
156 93
122 138
165 176
58 5
48 157
19 158
36 40
20 175
141 11
86 7
28 130
150 83
72 122
121 201
45 78
21 94
36 74
5 204
0 77
135 43
112 179
130 68
23 173
203 147
12 32
129 144
140 126
67 186
124 18
162 94
204 101
146 206
131 99
137 136
10 9
87 40
140 162
0 46
131 168
76 7
196 181
57 91
133 29
36 3
35 120
133 104
161 138
138 160
55 154
47 44
100 199
24 160
190 109
142 104
17 195
85 186
53 46
95 206
53 177
90 38
112 166
48 16
79 198
171 17
14 203
19 198
110 26
103 185
71 20
13 34
197 156
18 85
159 103
64 125
46 11
170 105
91 121
207 136
33 165
55 106
115 129
170 4
127 39
183 197
148 133
62 139
53 90
50 38
117 27
111 158
173 108
149 184
67 97
191 38
24 136
98 176
174 45
163 89
54 109
113 12
131 65
52 143
61 115
110 171
78 188
97 199
204 117
2 169
169 140
132 154
8 52
57 62
88 6
135 15
44 125
95 176
174 191
38 9
146 154
58 104
194 56
174 161
114 76
49 128
123 77
74 144
73 199
155 97
24 80
62 201
48 143
145 203
198 147
71 61
80 149
39 78
154 189
58 86
156 188
162 192
77 9
180 122
55 64
37 42
193 151
57 98
151 153
61 32
183 125
25 189
202 172
67 84
180 200
33 147
46 122
12 158
72 45
165 51
201 195
187 4
190 52
75 6
150 159
127 169
202 4
146 128
116 185
180 39
193 181
8 155
81 188
22 121
87 70
173 76
110 184
34 78
65 19
65 15
149 148
18 43
5 170
124 99
159 107
62 163
197 45
1 118
84 44
70 144
152 156
49 52
28 1
1 108
14 64
206 51
96 139
181 133
73 10
134 203
100 162
0 56
42 155
124 32
44 21
86 148
136 200
135 75
66 10
41 106
178 27
182 108
0 50
177 100
95 182
2 30
167 15
82 63
23 75
22 60
201 114
42 48
119 98
63 27
54 206
202 207
114 182
169 205
103 164
178 70
99 158
92 153
116 77
60 185
35 16
24 172
197 189
22 184
153 40
49 188
180 179
96 88
183 31
128 109
27 115
42 126
26 74
179 193
14 47
157 21
142 7
164 90
123 3
118 6
145 96
85 79
25 161
196 187
22 194
165 79
139 182
40 83
131 120
87 187
200 11
65 16
174 41
196 153
119 163
71 113
103 9
54 75
33 96
141 90
134 85
47 16
190 135
14 186
152 143
53 123
118 139
171 29
83 105
124 63
87 117
68 89
23 68
105 117
134 32
55 67
25 72
184 195
200 181
84 132
119 114
94 41
107 207
72 177
146 69
145 69
69 31
54 108
121 20
"""
waters="""
0.3125 0.1875 0.5106
0.84556 0.85799 0.07197
0.32303 0.67165 0.6064
0.38603 0.43685 0.48829
0.375 0.75 0.34511
0.625 0.75 0.24476
0.61397 0.93685 0.01171
0.13915 0.92165 0.28041
0.98109 0.65786 0.79493
0.937 0.35981 0.5486
0.91859 0.43867 0.60885
0.76103 0.06185 0.48829
0.33141 0.43867 0.10885
0.32038 0.87449 0.64323
0.34556 0.35799 0.92803
0.42647 0.74935 0.92513
0.41859 0.56133 0.89115
0.76415 0.04665 0.29203
0.88603 0.56316 0.01171
0.23897 0.56185 0.01171
0.375 0.25 0.25525
0.01415 0.45335 0.79203
0.05147 0.25066 0.33451
0.32303 0.82835 0.1064
0.23897 0.93816 0.51171
0.43388 0.125 0.72734
0.67698 0.32835 0.3936
0.98585 0.54665 0.20797
0.75 0.75 0.12052
0.61712 0.06301 0.41346
0.53256 0.62682 0.64323
0.88288 0.06301 0.91346
0.58141 0.43867 0.10885
0.92647 0.24935 0.07488
0.23585 0.95335 0.70797
0.66859 0.56133 0.89115
0.27994 0.43568 0.41346
0.625 0.75 0.74476
0.92647 0.25066 0.57488
0.96744 0.87318 0.64323
0.08141 0.56133 0.39115
0.125 0.25 0.75525
0.56613 0.625 0.77267
0.125 0.625 0.02343
0.03256 0.37318 0.85677
0.86085 0.07835 0.71959
0.56301 0.14019 0.5486
0.22006 0.43568 0.91346
0.35609 0.62383 0.82284
0.14391 0.87617 0.82284
0.16595 0.1415 0.5486
0.625 0.125 0.97658
0.125 0.75 0.84511
0.57353 0.24935 0.57488
0.26103 0.93816 0.01171
0.44853 0.25066 0.83451
0.34556 0.14202 0.42803
0.875 0.25 0.1549
0.56613 0.875 0.27267
0.84556 0.64202 0.57197
0.82038 0.37449 0.35677
0.64391 0.37617 0.17716
0.85609 0.12383 0.17716
0.96744 0.62682 0.14323
0.25 0.25 0.87949
0.437 0.64019 0.95141
0.72006 0.56432 0.58654
0.67962 0.37449 0.85677
0.53256 0.87318 0.14323
0.15444 0.14202 0.92803
0.36085 0.57835 0.28041
0.43388 0.375 0.22734
0.64391 0.12383 0.67716
0.85609 0.37617 0.67716
0.46744 0.37318 0.35677
0.375 0.875 0.02343
0.26415 0.95335 0.20797
0.1875 0.3125 0.5106
0.98585 0.95335 0.70797
0.875 0.375 0.97658
0.11397 0.06316 0.48829
0.875 0.75 0.74476
0.17698 0.67165 0.1064
0.83141 0.56133 0.39115
0.82303 0.32835 0.8936
0.76103 0.43816 0.98829
0.35609 0.87617 0.32284
0.14391 0.62383 0.32284
0.73897 0.06185 0.98829
0.51415 0.95335 0.20797
0.75 0.25 0.52914
0.01891 0.34214 0.20507
0.26103 0.56185 0.51171
0.83141 0.93867 0.89115
0.06613 0.375 0.72734
0.33406 0.1415 0.0486
0.937 0.14019 0.0486
0.76415 0.45335 0.79203
0.625 0.25 0.1549
0.65444 0.64202 0.07197
0.48109 0.34214 0.70507
0.94853 0.75066 0.1655
0.36085 0.92165 0.78041
0.73897 0.43816 0.48829
0.76415 0.92165 0.31469
0.73585 0.57835 0.31469
0.375 0.25 0.75525
0.375 0.625 0.52343
0.11712 0.93699 0.08654
0.06301 0.85981 0.95141
0.82303 0.17165 0.3936
0.32038 0.62551 0.14323
0.75 0.75 0.62052
0.23585 0.42165 0.18531
0.26415 0.07835 0.18531
0.86085 0.42165 0.21959
0.15444 0.35799 0.42803
0.93388 0.625 0.27267
0.72006 0.93568 0.08654
0.48109 0.15786 0.20507
0.83406 0.6415 0.95141
0.125 0.25 0.25525
0.58141 0.06133 0.60885
0.33406 0.3585 0.5486
0.77994 0.56432 0.08654
0.05147 0.24935 0.83451
0.51415 0.54665 0.70797
0.94853 0.74935 0.6655
0.08141 0.93867 0.89115
0.73585 0.45335 0.29203
0.55147 0.74935 0.1655
0.6875 0.6875 0.9894
0.67698 0.17165 0.8936
0.66859 0.93867 0.39115
0.56301 0.35981 0.0486
0.25 0.75 0.97087
0.125 0.875 0.52343
0.17698 0.82835 0.6064
0.33141 0.06133 0.60885
0.91859 0.06133 0.10885
0.26415 0.54665 0.70797
0.875 0.125 0.47658
0.98109 0.84214 0.29493
0.375 0.75 0.84511
0.48585 0.45335 0.29203
0.1875 0.1875 0.0106
0.27994 0.06432 0.91346
0.16595 0.3585 0.0486
0.41859 0.93867 0.39115
0.22006 0.06432 0.41346
0.66595 0.6415 0.45141
0.8125 0.6875 0.4894
0.51891 0.84214 0.79493
0.06301 0.64019 0.45141
0.46744 0.12682 0.85677
0.76415 0.57835 0.81469
0.73585 0.92165 0.81469
0.13915 0.57835 0.78041
0.38288 0.56301 0.08654
0.61397 0.56316 0.51171
0.38288 0.93699 0.58654
0.23585 0.07835 0.68531
0.26415 0.42165 0.68531
0.63915 0.07835 0.21959
0.625 0.375 0.47658
0.75 0.25 0.02914
0.55147 0.75066 0.6655
0.66595 0.8585 0.95141
0.8125 0.8125 0.9894
0.17962 0.62551 0.64323
0.51891 0.65786 0.29493
0.67962 0.12551 0.35677
0.437 0.85981 0.45141
0.17962 0.87449 0.14323
0.01891 0.15786 0.70507
0.44853 0.24935 0.33451
0.57353 0.25066 0.07488
0.625 0.25 0.6549
0.23585 0.54665 0.20797
0.65444 0.85799 0.57197
0.77994 0.93568 0.58654
0.83406 0.8585 0.45141
0.16859 0.06133 0.10885
0.82038 0.12551 0.85677
0.03256 0.12682 0.35677
0.88288 0.43699 0.41346
0.61712 0.43699 0.91346
0.125 0.75 0.34511
0.93388 0.875 0.77267
0.48585 0.04665 0.79203
0.07353 0.75066 0.92513
0.875 0.25 0.6549
0.16859 0.43867 0.60885
0.6875 0.8125 0.4894
0.25 0.25 0.37949
0.01415 0.04665 0.29203
0.07353 0.74935 0.42513
0.73585 0.04665 0.79203
0.11397 0.43685 0.98829
0.63915 0.42165 0.71959
0.88603 0.93685 0.51171
0.06613 0.125 0.22734
0.42647 0.75066 0.42513
0.3125 0.3125 0.0106
0.875 0.75 0.24476
0.11712 0.56301 0.58654
0.38603 0.06316 0.98829
0.25 0.75 0.47087
"""
coord= "relative"
cages="""
12 0.54412 0.24738 0.45315
12 0.95588 0.25262 0.45315
12 0.45588 0.74738 0.04685
12 0.45588 0.75262 0.54685
12 0.04412 0.75262 0.04685
12 0.04412 0.74738 0.54685
12 0.54412 0.25262 0.95315
12 0.95588 0.24738 0.95315
14 0.25 0.25 0.13611
14 0.75 0.75 0.36389
14 0.75 0.75 0.86389
14 0.25 0.25 0.63611
14 0.38225 0.06806 0.30582
14 0.11775 0.43194 0.30582
14 0.61775 0.56806 0.19418
14 0.61775 0.93194 0.69418
14 0.88225 0.93194 0.19418
14 0.88225 0.56806 0.69418
14 0.38225 0.43194 0.80582
14 0.11775 0.06806 0.80582
15 0.5421 0.06339 0.10512
15 0.9579 0.43661 0.10512
15 0.4579 0.56339 0.39488
15 0.4579 0.93661 0.89488
15 0.0421 0.93661 0.39488
15 0.0421 0.56339 0.89488
15 0.5421 0.43661 0.60512
15 0.9579 0.06339 0.60512
12 0.0 0.0 0.0
12 0.5 0.5 0.0
12 0.0 0.5 0.5
12 0.5 0.0 0.5
14 0.25 0.75 0.22677
14 0.75 0.25 0.27323
14 0.75 0.25 0.77323
14 0.25 0.75 0.72677
"""
bondlen = 3
cell = """
16.097405686270985 30.870809118877812 57.365438878590446
"""
density = 0.21809256820530465
from genice.cell import cellvectors
cell = cellvectors(a=16.097405686270985,
b=30.870809118877812,
c=57.365438878590446) | PypiClean |
/Flask-Swag-0.1.2.tar.gz/Flask-Swag-0.1.2/flask_swag/resources/swagger-ui/lib/backbone-min.js |
(function(t,e){if(typeof define==="function"&&define.amd){define(["underscore","jquery","exports"],function(i,r,s){t.Backbone=e(t,s,i,r)})}else if(typeof exports!=="undefined"){var i=require("underscore");e(t,exports,i)}else{t.Backbone=e(t,{},t._,t.jQuery||t.Zepto||t.ender||t.$)}})(this,function(t,e,i,r){var s=t.Backbone;var n=[];var a=n.push;var o=n.slice;var h=n.splice;e.VERSION="1.1.2";e.$=r;e.noConflict=function(){t.Backbone=s;return this};e.emulateHTTP=false;e.emulateJSON=false;var u=e.Events={on:function(t,e,i){if(!c(this,"on",t,[e,i])||!e)return this;this._events||(this._events={});var r=this._events[t]||(this._events[t]=[]);r.push({callback:e,context:i,ctx:i||this});return this},once:function(t,e,r){if(!c(this,"once",t,[e,r])||!e)return this;var s=this;var n=i.once(function(){s.off(t,n);e.apply(this,arguments)});n._callback=e;return this.on(t,n,r)},off:function(t,e,r){var s,n,a,o,h,u,l,f;if(!this._events||!c(this,"off",t,[e,r]))return this;if(!t&&!e&&!r){this._events=void 0;return this}o=t?[t]:i.keys(this._events);for(h=0,u=o.length;h<u;h++){t=o[h];if(a=this._events[t]){this._events[t]=s=[];if(e||r){for(l=0,f=a.length;l<f;l++){n=a[l];if(e&&e!==n.callback&&e!==n.callback._callback||r&&r!==n.context){s.push(n)}}}if(!s.length)delete this._events[t]}}return this},trigger:function(t){if(!this._events)return this;var e=o.call(arguments,1);if(!c(this,"trigger",t,e))return this;var i=this._events[t];var r=this._events.all;if(i)f(i,e);if(r)f(r,arguments);return this},stopListening:function(t,e,r){var s=this._listeningTo;if(!s)return this;var n=!e&&!r;if(!r&&typeof e==="object")r=this;if(t)(s={})[t._listenId]=t;for(var a in s){t=s[a];t.off(e,r,this);if(n||i.isEmpty(t._events))delete this._listeningTo[a]}return this}};var l=/\s+/;var c=function(t,e,i,r){if(!i)return true;if(typeof i==="object"){for(var s in i){t[e].apply(t,[s,i[s]].concat(r))}return false}if(l.test(i)){var n=i.split(l);for(var a=0,o=n.length;a<o;a++){t[e].apply(t,[n[a]].concat(r))}return false}return true};var f=function(t,e){var i,r=-1,s=t.length,n=e[0],a=e[1],o=e[2];switch(e.length){case 0:while(++r<s)(i=t[r]).callback.call(i.ctx);return;case 1:while(++r<s)(i=t[r]).callback.call(i.ctx,n);return;case 2:while(++r<s)(i=t[r]).callback.call(i.ctx,n,a);return;case 3:while(++r<s)(i=t[r]).callback.call(i.ctx,n,a,o);return;default:while(++r<s)(i=t[r]).callback.apply(i.ctx,e);return}};var d={listenTo:"on",listenToOnce:"once"};i.each(d,function(t,e){u[e]=function(e,r,s){var n=this._listeningTo||(this._listeningTo={});var a=e._listenId||(e._listenId=i.uniqueId("l"));n[a]=e;if(!s&&typeof r==="object")s=this;e[t](r,s,this);return this}});u.bind=u.on;u.unbind=u.off;i.extend(e,u);var p=e.Model=function(t,e){var r=t||{};e||(e={});this.cid=i.uniqueId("c");this.attributes={};if(e.collection)this.collection=e.collection;if(e.parse)r=this.parse(r,e)||{};r=i.defaults({},r,i.result(this,"defaults"));this.set(r,e);this.changed={};this.initialize.apply(this,arguments)};i.extend(p.prototype,u,{changed:null,validationError:null,idAttribute:"id",initialize:function(){},toJSON:function(t){return i.clone(this.attributes)},sync:function(){return e.sync.apply(this,arguments)},get:function(t){return this.attributes[t]},escape:function(t){return i.escape(this.get(t))},has:function(t){return this.get(t)!=null},set:function(t,e,r){var s,n,a,o,h,u,l,c;if(t==null)return this;if(typeof t==="object"){n=t;r=e}else{(n={})[t]=e}r||(r={});if(!this._validate(n,r))return false;a=r.unset;h=r.silent;o=[];u=this._changing;this._changing=true;if(!u){this._previousAttributes=i.clone(this.attributes);this.changed={}}c=this.attributes,l=this._previousAttributes;if(this.idAttribute in n)this.id=n[this.idAttribute];for(s in n){e=n[s];if(!i.isEqual(c[s],e))o.push(s);if(!i.isEqual(l[s],e)){this.changed[s]=e}else{delete this.changed[s]}a?delete c[s]:c[s]=e}if(!h){if(o.length)this._pending=r;for(var f=0,d=o.length;f<d;f++){this.trigger("change:"+o[f],this,c[o[f]],r)}}if(u)return this;if(!h){while(this._pending){r=this._pending;this._pending=false;this.trigger("change",this,r)}}this._pending=false;this._changing=false;return this},unset:function(t,e){return this.set(t,void 0,i.extend({},e,{unset:true}))},clear:function(t){var e={};for(var r in this.attributes)e[r]=void 0;return this.set(e,i.extend({},t,{unset:true}))},hasChanged:function(t){if(t==null)return!i.isEmpty(this.changed);return i.has(this.changed,t)},changedAttributes:function(t){if(!t)return this.hasChanged()?i.clone(this.changed):false;var e,r=false;var s=this._changing?this._previousAttributes:this.attributes;for(var n in t){if(i.isEqual(s[n],e=t[n]))continue;(r||(r={}))[n]=e}return r},previous:function(t){if(t==null||!this._previousAttributes)return null;return this._previousAttributes[t]},previousAttributes:function(){return i.clone(this._previousAttributes)},fetch:function(t){t=t?i.clone(t):{};if(t.parse===void 0)t.parse=true;var e=this;var r=t.success;t.success=function(i){if(!e.set(e.parse(i,t),t))return false;if(r)r(e,i,t);e.trigger("sync",e,i,t)};q(this,t);return this.sync("read",this,t)},save:function(t,e,r){var s,n,a,o=this.attributes;if(t==null||typeof t==="object"){s=t;r=e}else{(s={})[t]=e}r=i.extend({validate:true},r);if(s&&!r.wait){if(!this.set(s,r))return false}else{if(!this._validate(s,r))return false}if(s&&r.wait){this.attributes=i.extend({},o,s)}if(r.parse===void 0)r.parse=true;var h=this;var u=r.success;r.success=function(t){h.attributes=o;var e=h.parse(t,r);if(r.wait)e=i.extend(s||{},e);if(i.isObject(e)&&!h.set(e,r)){return false}if(u)u(h,t,r);h.trigger("sync",h,t,r)};q(this,r);n=this.isNew()?"create":r.patch?"patch":"update";if(n==="patch")r.attrs=s;a=this.sync(n,this,r);if(s&&r.wait)this.attributes=o;return a},destroy:function(t){t=t?i.clone(t):{};var e=this;var r=t.success;var s=function(){e.trigger("destroy",e,e.collection,t)};t.success=function(i){if(t.wait||e.isNew())s();if(r)r(e,i,t);if(!e.isNew())e.trigger("sync",e,i,t)};if(this.isNew()){t.success();return false}q(this,t);var n=this.sync("delete",this,t);if(!t.wait)s();return n},url:function(){var t=i.result(this,"urlRoot")||i.result(this.collection,"url")||M();if(this.isNew())return t;return t.replace(/([^\/])$/,"$1/")+encodeURIComponent(this.id)},parse:function(t,e){return t},clone:function(){return new this.constructor(this.attributes)},isNew:function(){return!this.has(this.idAttribute)},isValid:function(t){return this._validate({},i.extend(t||{},{validate:true}))},_validate:function(t,e){if(!e.validate||!this.validate)return true;t=i.extend({},this.attributes,t);var r=this.validationError=this.validate(t,e)||null;if(!r)return true;this.trigger("invalid",this,r,i.extend(e,{validationError:r}));return false}});var v=["keys","values","pairs","invert","pick","omit"];i.each(v,function(t){p.prototype[t]=function(){var e=o.call(arguments);e.unshift(this.attributes);return i[t].apply(i,e)}});var g=e.Collection=function(t,e){e||(e={});if(e.model)this.model=e.model;if(e.comparator!==void 0)this.comparator=e.comparator;this._reset();this.initialize.apply(this,arguments);if(t)this.reset(t,i.extend({silent:true},e))};var m={add:true,remove:true,merge:true};var y={add:true,remove:false};i.extend(g.prototype,u,{model:p,initialize:function(){},toJSON:function(t){return this.map(function(e){return e.toJSON(t)})},sync:function(){return e.sync.apply(this,arguments)},add:function(t,e){return this.set(t,i.extend({merge:false},e,y))},remove:function(t,e){var r=!i.isArray(t);t=r?[t]:i.clone(t);e||(e={});var s,n,a,o;for(s=0,n=t.length;s<n;s++){o=t[s]=this.get(t[s]);if(!o)continue;delete this._byId[o.id];delete this._byId[o.cid];a=this.indexOf(o);this.models.splice(a,1);this.length--;if(!e.silent){e.index=a;o.trigger("remove",o,this,e)}this._removeReference(o,e)}return r?t[0]:t},set:function(t,e){e=i.defaults({},e,m);if(e.parse)t=this.parse(t,e);var r=!i.isArray(t);t=r?t?[t]:[]:i.clone(t);var s,n,a,o,h,u,l;var c=e.at;var f=this.model;var d=this.comparator&&c==null&&e.sort!==false;var v=i.isString(this.comparator)?this.comparator:null;var g=[],y=[],_={};var b=e.add,w=e.merge,x=e.remove;var E=!d&&b&&x?[]:false;for(s=0,n=t.length;s<n;s++){h=t[s]||{};if(h instanceof p){a=o=h}else{a=h[f.prototype.idAttribute||"id"]}if(u=this.get(a)){if(x)_[u.cid]=true;if(w){h=h===o?o.attributes:h;if(e.parse)h=u.parse(h,e);u.set(h,e);if(d&&!l&&u.hasChanged(v))l=true}t[s]=u}else if(b){o=t[s]=this._prepareModel(h,e);if(!o)continue;g.push(o);this._addReference(o,e)}o=u||o;if(E&&(o.isNew()||!_[o.id]))E.push(o);_[o.id]=true}if(x){for(s=0,n=this.length;s<n;++s){if(!_[(o=this.models[s]).cid])y.push(o)}if(y.length)this.remove(y,e)}if(g.length||E&&E.length){if(d)l=true;this.length+=g.length;if(c!=null){for(s=0,n=g.length;s<n;s++){this.models.splice(c+s,0,g[s])}}else{if(E)this.models.length=0;var k=E||g;for(s=0,n=k.length;s<n;s++){this.models.push(k[s])}}}if(l)this.sort({silent:true});if(!e.silent){for(s=0,n=g.length;s<n;s++){(o=g[s]).trigger("add",o,this,e)}if(l||E&&E.length)this.trigger("sort",this,e)}return r?t[0]:t},reset:function(t,e){e||(e={});for(var r=0,s=this.models.length;r<s;r++){this._removeReference(this.models[r],e)}e.previousModels=this.models;this._reset();t=this.add(t,i.extend({silent:true},e));if(!e.silent)this.trigger("reset",this,e);return t},push:function(t,e){return this.add(t,i.extend({at:this.length},e))},pop:function(t){var e=this.at(this.length-1);this.remove(e,t);return e},unshift:function(t,e){return this.add(t,i.extend({at:0},e))},shift:function(t){var e=this.at(0);this.remove(e,t);return e},slice:function(){return o.apply(this.models,arguments)},get:function(t){if(t==null)return void 0;return this._byId[t]||this._byId[t.id]||this._byId[t.cid]},at:function(t){return this.models[t]},where:function(t,e){if(i.isEmpty(t))return e?void 0:[];return this[e?"find":"filter"](function(e){for(var i in t){if(t[i]!==e.get(i))return false}return true})},findWhere:function(t){return this.where(t,true)},sort:function(t){if(!this.comparator)throw new Error("Cannot sort a set without a comparator");t||(t={});if(i.isString(this.comparator)||this.comparator.length===1){this.models=this.sortBy(this.comparator,this)}else{this.models.sort(i.bind(this.comparator,this))}if(!t.silent)this.trigger("sort",this,t);return this},pluck:function(t){return i.invoke(this.models,"get",t)},fetch:function(t){t=t?i.clone(t):{};if(t.parse===void 0)t.parse=true;var e=t.success;var r=this;t.success=function(i){var s=t.reset?"reset":"set";r[s](i,t);if(e)e(r,i,t);r.trigger("sync",r,i,t)};q(this,t);return this.sync("read",this,t)},create:function(t,e){e=e?i.clone(e):{};if(!(t=this._prepareModel(t,e)))return false;if(!e.wait)this.add(t,e);var r=this;var s=e.success;e.success=function(t,i){if(e.wait)r.add(t,e);if(s)s(t,i,e)};t.save(null,e);return t},parse:function(t,e){return t},clone:function(){return new this.constructor(this.models)},_reset:function(){this.length=0;this.models=[];this._byId={}},_prepareModel:function(t,e){if(t instanceof p)return t;e=e?i.clone(e):{};e.collection=this;var r=new this.model(t,e);if(!r.validationError)return r;this.trigger("invalid",this,r.validationError,e);return false},_addReference:function(t,e){this._byId[t.cid]=t;if(t.id!=null)this._byId[t.id]=t;if(!t.collection)t.collection=this;t.on("all",this._onModelEvent,this)},_removeReference:function(t,e){if(this===t.collection)delete t.collection;t.off("all",this._onModelEvent,this)},_onModelEvent:function(t,e,i,r){if((t==="add"||t==="remove")&&i!==this)return;if(t==="destroy")this.remove(e,r);if(e&&t==="change:"+e.idAttribute){delete this._byId[e.previous(e.idAttribute)];if(e.id!=null)this._byId[e.id]=e}this.trigger.apply(this,arguments)}});var _=["forEach","each","map","collect","reduce","foldl","inject","reduceRight","foldr","find","detect","filter","select","reject","every","all","some","any","include","contains","invoke","max","min","toArray","size","first","head","take","initial","rest","tail","drop","last","without","difference","indexOf","shuffle","lastIndexOf","isEmpty","chain","sample"];i.each(_,function(t){g.prototype[t]=function(){var e=o.call(arguments);e.unshift(this.models);return i[t].apply(i,e)}});var b=["groupBy","countBy","sortBy","indexBy"];i.each(b,function(t){g.prototype[t]=function(e,r){var s=i.isFunction(e)?e:function(t){return t.get(e)};return i[t](this.models,s,r)}});var w=e.View=function(t){this.cid=i.uniqueId("view");t||(t={});i.extend(this,i.pick(t,E));this._ensureElement();this.initialize.apply(this,arguments);this.delegateEvents()};var x=/^(\S+)\s*(.*)$/;var E=["model","collection","el","id","attributes","className","tagName","events"];i.extend(w.prototype,u,{tagName:"div",$:function(t){return this.$el.find(t)},initialize:function(){},render:function(){return this},remove:function(){this.$el.remove();this.stopListening();return this},setElement:function(t,i){if(this.$el)this.undelegateEvents();this.$el=t instanceof e.$?t:e.$(t);this.el=this.$el[0];if(i!==false)this.delegateEvents();return this},delegateEvents:function(t){if(!(t||(t=i.result(this,"events"))))return this;this.undelegateEvents();for(var e in t){var r=t[e];if(!i.isFunction(r))r=this[t[e]];if(!r)continue;var s=e.match(x);var n=s[1],a=s[2];r=i.bind(r,this);n+=".delegateEvents"+this.cid;if(a===""){this.$el.on(n,r)}else{this.$el.on(n,a,r)}}return this},undelegateEvents:function(){this.$el.off(".delegateEvents"+this.cid);return this},_ensureElement:function(){if(!this.el){var t=i.extend({},i.result(this,"attributes"));if(this.id)t.id=i.result(this,"id");if(this.className)t["class"]=i.result(this,"className");var r=e.$("<"+i.result(this,"tagName")+">").attr(t);this.setElement(r,false)}else{this.setElement(i.result(this,"el"),false)}}});e.sync=function(t,r,s){var n=T[t];i.defaults(s||(s={}),{emulateHTTP:e.emulateHTTP,emulateJSON:e.emulateJSON});var a={type:n,dataType:"json"};if(!s.url){a.url=i.result(r,"url")||M()}if(s.data==null&&r&&(t==="create"||t==="update"||t==="patch")){a.contentType="application/json";a.data=JSON.stringify(s.attrs||r.toJSON(s))}if(s.emulateJSON){a.contentType="application/x-www-form-urlencoded";a.data=a.data?{model:a.data}:{}}if(s.emulateHTTP&&(n==="PUT"||n==="DELETE"||n==="PATCH")){a.type="POST";if(s.emulateJSON)a.data._method=n;var o=s.beforeSend;s.beforeSend=function(t){t.setRequestHeader("X-HTTP-Method-Override",n);if(o)return o.apply(this,arguments)}}if(a.type!=="GET"&&!s.emulateJSON){a.processData=false}if(a.type==="PATCH"&&k){a.xhr=function(){return new ActiveXObject("Microsoft.XMLHTTP")}}var h=s.xhr=e.ajax(i.extend(a,s));r.trigger("request",r,h,s);return h};var k=typeof window!=="undefined"&&!!window.ActiveXObject&&!(window.XMLHttpRequest&&(new XMLHttpRequest).dispatchEvent);var T={create:"POST",update:"PUT",patch:"PATCH","delete":"DELETE",read:"GET"};e.ajax=function(){return e.$.ajax.apply(e.$,arguments)};var $=e.Router=function(t){t||(t={});if(t.routes)this.routes=t.routes;this._bindRoutes();this.initialize.apply(this,arguments)};var S=/\((.*?)\)/g;var H=/(\(\?)?:\w+/g;var A=/\*\w+/g;var I=/[\-{}\[\]+?.,\\\^$|#\s]/g;i.extend($.prototype,u,{initialize:function(){},route:function(t,r,s){if(!i.isRegExp(t))t=this._routeToRegExp(t);if(i.isFunction(r)){s=r;r=""}if(!s)s=this[r];var n=this;e.history.route(t,function(i){var a=n._extractParameters(t,i);n.execute(s,a);n.trigger.apply(n,["route:"+r].concat(a));n.trigger("route",r,a);e.history.trigger("route",n,r,a)});return this},execute:function(t,e){if(t)t.apply(this,e)},navigate:function(t,i){e.history.navigate(t,i);return this},_bindRoutes:function(){if(!this.routes)return;this.routes=i.result(this,"routes");var t,e=i.keys(this.routes);while((t=e.pop())!=null){this.route(t,this.routes[t])}},_routeToRegExp:function(t){t=t.replace(I,"\\$&").replace(S,"(?:$1)?").replace(H,function(t,e){return e?t:"([^/?]+)"}).replace(A,"([^?]*?)");return new RegExp("^"+t+"(?:\\?([\\s\\S]*))?$")},_extractParameters:function(t,e){var r=t.exec(e).slice(1);return i.map(r,function(t,e){if(e===r.length-1)return t||null;return t?decodeURIComponent(t):null})}});var N=e.History=function(){this.handlers=[];i.bindAll(this,"checkUrl");if(typeof window!=="undefined"){this.location=window.location;this.history=window.history}};var R=/^[#\/]|\s+$/g;var O=/^\/+|\/+$/g;var P=/msie [\w.]+/;var C=/\/$/;var j=/#.*$/;N.started=false;i.extend(N.prototype,u,{interval:50,atRoot:function(){return this.location.pathname.replace(/[^\/]$/,"$&/")===this.root},getHash:function(t){var e=(t||this).location.href.match(/#(.*)$/);return e?e[1]:""},getFragment:function(t,e){if(t==null){if(this._hasPushState||!this._wantsHashChange||e){t=decodeURI(this.location.pathname+this.location.search);var i=this.root.replace(C,"");if(!t.indexOf(i))t=t.slice(i.length)}else{t=this.getHash()}}return t.replace(R,"")},start:function(t){if(N.started)throw new Error("Backbone.history has already been started");N.started=true;this.options=i.extend({root:"/"},this.options,t);this.root=this.options.root;this._wantsHashChange=this.options.hashChange!==false;this._wantsPushState=!!this.options.pushState;this._hasPushState=!!(this.options.pushState&&this.history&&this.history.pushState);var r=this.getFragment();var s=document.documentMode;var n=P.exec(navigator.userAgent.toLowerCase())&&(!s||s<=7);this.root=("/"+this.root+"/").replace(O,"/");if(n&&this._wantsHashChange){var a=e.$('<iframe src="javascript:0" tabindex="-1">');this.iframe=a.hide().appendTo("body")[0].contentWindow;this.navigate(r)}if(this._hasPushState){e.$(window).on("popstate",this.checkUrl)}else if(this._wantsHashChange&&"onhashchange"in window&&!n){e.$(window).on("hashchange",this.checkUrl)}else if(this._wantsHashChange){this._checkUrlInterval=setInterval(this.checkUrl,this.interval)}this.fragment=r;var o=this.location;if(this._wantsHashChange&&this._wantsPushState){if(!this._hasPushState&&!this.atRoot()){this.fragment=this.getFragment(null,true);this.location.replace(this.root+"#"+this.fragment);return true}else if(this._hasPushState&&this.atRoot()&&o.hash){this.fragment=this.getHash().replace(R,"");this.history.replaceState({},document.title,this.root+this.fragment)}}if(!this.options.silent)return this.loadUrl()},stop:function(){e.$(window).off("popstate",this.checkUrl).off("hashchange",this.checkUrl);if(this._checkUrlInterval)clearInterval(this._checkUrlInterval);N.started=false},route:function(t,e){this.handlers.unshift({route:t,callback:e})},checkUrl:function(t){var e=this.getFragment();if(e===this.fragment&&this.iframe){e=this.getFragment(this.getHash(this.iframe))}if(e===this.fragment)return false;if(this.iframe)this.navigate(e);this.loadUrl()},loadUrl:function(t){t=this.fragment=this.getFragment(t);return i.any(this.handlers,function(e){if(e.route.test(t)){e.callback(t);return true}})},navigate:function(t,e){if(!N.started)return false;if(!e||e===true)e={trigger:!!e};var i=this.root+(t=this.getFragment(t||""));t=t.replace(j,"");if(this.fragment===t)return;this.fragment=t;if(t===""&&i!=="/")i=i.slice(0,-1);if(this._hasPushState){this.history[e.replace?"replaceState":"pushState"]({},document.title,i)}else if(this._wantsHashChange){this._updateHash(this.location,t,e.replace);if(this.iframe&&t!==this.getFragment(this.getHash(this.iframe))){if(!e.replace)this.iframe.document.open().close();this._updateHash(this.iframe.location,t,e.replace)}}else{return this.location.assign(i)}if(e.trigger)return this.loadUrl(t)},_updateHash:function(t,e,i){if(i){var r=t.href.replace(/(javascript:|#).*$/,"");t.replace(r+"#"+e)}else{t.hash="#"+e}}});e.history=new N;var U=function(t,e){var r=this;var s;if(t&&i.has(t,"constructor")){s=t.constructor}else{s=function(){return r.apply(this,arguments)}}i.extend(s,r,e);var n=function(){this.constructor=s};n.prototype=r.prototype;s.prototype=new n;if(t)i.extend(s.prototype,t);s.__super__=r.prototype;return s};p.extend=g.extend=$.extend=w.extend=N.extend=U;var M=function(){throw new Error('A "url" property or function must be specified')};var q=function(t,e){var i=e.error;e.error=function(r){if(i)i(t,r,e);t.trigger("error",t,r,e)}};return e});
// From http://stackoverflow.com/a/19431552
// Compatibility override - Backbone 1.1 got rid of the 'options' binding
// automatically to views in the constructor - we need to keep that.
Backbone.View = (function(View) {
return View.extend({
constructor: function(options) {
this.options = options || {};
View.apply(this, arguments);
}
});
})(Backbone.View); | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/mode/mirc/mirc.js |
//mIRC mode by Ford_Lawnmower :: Based on Velocity mode by Steve O'Hara
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMIME("text/mirc", "mirc");
CodeMirror.defineMode("mirc", function() {
function parseWords(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var specials = parseWords("$! $$ $& $? $+ $abook $abs $active $activecid " +
"$activewid $address $addtok $agent $agentname $agentstat $agentver " +
"$alias $and $anick $ansi2mirc $aop $appactive $appstate $asc $asctime " +
"$asin $atan $avoice $away $awaymsg $awaytime $banmask $base $bfind " +
"$binoff $biton $bnick $bvar $bytes $calc $cb $cd $ceil $chan $chanmodes " +
"$chantypes $chat $chr $cid $clevel $click $cmdbox $cmdline $cnick $color " +
"$com $comcall $comchan $comerr $compact $compress $comval $cos $count " +
"$cr $crc $creq $crlf $ctime $ctimer $ctrlenter $date $day $daylight " +
"$dbuh $dbuw $dccignore $dccport $dde $ddename $debug $decode $decompress " +
"$deltok $devent $dialog $did $didreg $didtok $didwm $disk $dlevel $dll " +
"$dllcall $dname $dns $duration $ebeeps $editbox $emailaddr $encode $error " +
"$eval $event $exist $feof $ferr $fgetc $file $filename $filtered $finddir " +
"$finddirn $findfile $findfilen $findtok $fline $floor $fopen $fread $fserve " +
"$fulladdress $fulldate $fullname $fullscreen $get $getdir $getdot $gettok $gmt " +
"$group $halted $hash $height $hfind $hget $highlight $hnick $hotline " +
"$hotlinepos $ial $ialchan $ibl $idle $iel $ifmatch $ignore $iif $iil " +
"$inelipse $ini $inmidi $inpaste $inpoly $input $inrect $inroundrect " +
"$insong $instok $int $inwave $ip $isalias $isbit $isdde $isdir $isfile " +
"$isid $islower $istok $isupper $keychar $keyrpt $keyval $knick $lactive " +
"$lactivecid $lactivewid $left $len $level $lf $line $lines $link $lock " +
"$lock $locked $log $logstamp $logstampfmt $longfn $longip $lower $ltimer " +
"$maddress $mask $matchkey $matchtok $md5 $me $menu $menubar $menucontext " +
"$menutype $mid $middir $mircdir $mircexe $mircini $mklogfn $mnick $mode " +
"$modefirst $modelast $modespl $mouse $msfile $network $newnick $nick $nofile " +
"$nopath $noqt $not $notags $notify $null $numeric $numok $oline $onpoly " +
"$opnick $or $ord $os $passivedcc $pic $play $pnick $port $portable $portfree " +
"$pos $prefix $prop $protect $puttok $qt $query $rand $r $rawmsg $read $readomo " +
"$readn $regex $regml $regsub $regsubex $remove $remtok $replace $replacex " +
"$reptok $result $rgb $right $round $scid $scon $script $scriptdir $scriptline " +
"$sdir $send $server $serverip $sfile $sha1 $shortfn $show $signal $sin " +
"$site $sline $snick $snicks $snotify $sock $sockbr $sockerr $sockname " +
"$sorttok $sound $sqrt $ssl $sreq $sslready $status $strip $str $stripped " +
"$syle $submenu $switchbar $tan $target $ticks $time $timer $timestamp " +
"$timestampfmt $timezone $tip $titlebar $toolbar $treebar $trust $ulevel " +
"$ulist $upper $uptime $url $usermode $v1 $v2 $var $vcmd $vcmdstat $vcmdver " +
"$version $vnick $vol $wid $width $wildsite $wildtok $window $wrap $xor");
var keywords = parseWords("abook ajinvite alias aline ame amsg anick aop auser autojoin avoice " +
"away background ban bcopy beep bread break breplace bset btrunc bunset bwrite " +
"channel clear clearall cline clipboard close cnick color comclose comopen " +
"comreg continue copy creq ctcpreply ctcps dcc dccserver dde ddeserver " +
"debug dec describe dialog did didtok disable disconnect dlevel dline dll " +
"dns dqwindow drawcopy drawdot drawfill drawline drawpic drawrect drawreplace " +
"drawrot drawsave drawscroll drawtext ebeeps echo editbox emailaddr enable " +
"events exit fclose filter findtext finger firewall flash flist flood flush " +
"flushini font fopen fseek fsend fserve fullname fwrite ghide gload gmove " +
"gopts goto gplay gpoint gqreq groups gshow gsize gstop gtalk gunload hadd " +
"halt haltdef hdec hdel help hfree hinc hload hmake hop hsave ial ialclear " +
"ialmark identd if ignore iline inc invite iuser join kick linesep links list " +
"load loadbuf localinfo log mdi me menubar mkdir mnick mode msg nick noop notice " +
"notify omsg onotice part partall pdcc perform play playctrl pop protect pvoice " +
"qme qmsg query queryn quit raw reload remini remote remove rename renwin " +
"reseterror resetidle return rlevel rline rmdir run ruser save savebuf saveini " +
"say scid scon server set showmirc signam sline sockaccept sockclose socklist " +
"socklisten sockmark sockopen sockpause sockread sockrename sockudp sockwrite " +
"sound speak splay sreq strip switchbar timer timestamp titlebar tnick tokenize " +
"toolbar topic tray treebar ulist unload unset unsetall updatenl url uwho " +
"var vcadd vcmd vcrem vol while whois window winhelp write writeint if isalnum " +
"isalpha isaop isavoice isban ischan ishop isignore isin isincs isletter islower " +
"isnotify isnum ison isop isprotect isreg isupper isvoice iswm iswmcs " +
"elseif else goto menu nicklist status title icon size option text edit " +
"button check radio box scroll list combo link tab item");
var functions = parseWords("if elseif else and not or eq ne in ni for foreach while switch");
var isOperatorChar = /[+\-*&%=<>!?^\/\|]/;
function chain(stream, state, f) {
state.tokenize = f;
return f(stream, state);
}
function tokenBase(stream, state) {
var beforeParams = state.beforeParams;
state.beforeParams = false;
var ch = stream.next();
if (/[\[\]{}\(\),\.]/.test(ch)) {
if (ch == "(" && beforeParams) state.inParams = true;
else if (ch == ")") state.inParams = false;
return null;
}
else if (/\d/.test(ch)) {
stream.eatWhile(/[\w\.]/);
return "number";
}
else if (ch == "\\") {
stream.eat("\\");
stream.eat(/./);
return "number";
}
else if (ch == "/" && stream.eat("*")) {
return chain(stream, state, tokenComment);
}
else if (ch == ";" && stream.match(/ *\( *\(/)) {
return chain(stream, state, tokenUnparsed);
}
else if (ch == ";" && !state.inParams) {
stream.skipToEnd();
return "comment";
}
else if (ch == '"') {
stream.eat(/"/);
return "keyword";
}
else if (ch == "$") {
stream.eatWhile(/[$_a-z0-9A-Z\.:]/);
if (specials && specials.propertyIsEnumerable(stream.current().toLowerCase())) {
return "keyword";
}
else {
state.beforeParams = true;
return "builtin";
}
}
else if (ch == "%") {
stream.eatWhile(/[^,^\s^\(^\)]/);
state.beforeParams = true;
return "string";
}
else if (isOperatorChar.test(ch)) {
stream.eatWhile(isOperatorChar);
return "operator";
}
else {
stream.eatWhile(/[\w\$_{}]/);
var word = stream.current().toLowerCase();
if (keywords && keywords.propertyIsEnumerable(word))
return "keyword";
if (functions && functions.propertyIsEnumerable(word)) {
state.beforeParams = true;
return "keyword";
}
return null;
}
}
function tokenComment(stream, state) {
var maybeEnd = false, ch;
while (ch = stream.next()) {
if (ch == "/" && maybeEnd) {
state.tokenize = tokenBase;
break;
}
maybeEnd = (ch == "*");
}
return "comment";
}
function tokenUnparsed(stream, state) {
var maybeEnd = 0, ch;
while (ch = stream.next()) {
if (ch == ";" && maybeEnd == 2) {
state.tokenize = tokenBase;
break;
}
if (ch == ")")
maybeEnd++;
else if (ch != " ")
maybeEnd = 0;
}
return "meta";
}
return {
startState: function() {
return {
tokenize: tokenBase,
beforeParams: false,
inParams: false
};
},
token: function(stream, state) {
if (stream.eatSpace()) return null;
return state.tokenize(stream, state);
}
};
});
}); | PypiClean |
/Choco-1.0.5.tar.gz/Choco-1.0.5/README.rst | =========================
Choco Templates for Python
=========================
Choco is a template library written in Python. It provides a familiar, non-XML
syntax which compiles into Python modules for maximum performance. Choco's
syntax and API borrows from the best ideas of many others, including Django
templates, Cheetah, Myghty, and Genshi. Conceptually, Choco is an embedded
Python (i.e. Python Server Page) language, which refines the familiar ideas
of componentized layout and inheritance to produce one of the most
straightforward and flexible models available, while also maintaining close
ties to Python calling and scoping semantics.
Nutshell
========
::
<%inherit file="base.html"/>
<%
rows = [[v for v in range(0,10)] for row in range(0,10)]
%>
<table>
% for row in rows:
${makerow(row)}
% endfor
</table>
<%def name="makerow(row)">
<tr>
% for name in row:
<td>${name}</td>\
% endfor
</tr>
</%def>
UI
============
UI tag is inspired by EmberJs Handlebarjs tempalte.
index.html
----------------
::
This is an UI Page
<%@ PostView(post_id)/>
ui/post.html
----------------
::
This is a Post View
Name: ${post.title}
Content: ${post.content}
.. code:: python
def create_ui_container():
from choco.ui import UIContainer, UIModule
ui_container = UIContainer(["template/ui"])
class PostView(UIModule):
default_template = "post.html"
def initialize(self):
self.thing = Thing("Post")
def render(self, post_id):
post = self.thing.getByPostId(post_id)
return {
"post": post
}
ui_container.put_ui("PostView", PostView)
return ui_container
tl2 = lookup.TemplateLookup(directories=["template"], ui_container=create_ui_container())
t12.get_template("index.html").render(post_id=122)
Philosophy
===========
Python is a great scripting language. Don't reinvent the wheel...your templates can handle it !
License
========
Choco is licensed under an MIT-style license (see LICENSE).
Other incorporated projects may be licensed under different licenses.
All licenses allow for non-commercial and commercial use.
| PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/channel_version_response_py3.py |
from msrest.serialization import Model
class ChannelVersionResponse(Model):
"""ChannelVersionResponse.
:param channel_version_id: The channel version identifier
:type channel_version_id: int
:param multiplier: The channel multiplier
:type multiplier: float
:param unit:
:type unit: ~energycap.sdk.models.UnitChild
:param observation_rule:
:type observation_rule: ~energycap.sdk.models.ObservationRule
:param maximum_reading: The channel's max reading
:type maximum_reading: float
:param begin_date: Date this channel version started to be used
:type begin_date: datetime
:param end_date: Date this channel version stopped being used
:type end_date: datetime
:param udfs: List of user defined/custom fields and values for this
version
:type udfs: list[~energycap.sdk.models.UDFFieldChild]
"""
_attribute_map = {
'channel_version_id': {'key': 'channelVersionId', 'type': 'int'},
'multiplier': {'key': 'multiplier', 'type': 'float'},
'unit': {'key': 'unit', 'type': 'UnitChild'},
'observation_rule': {'key': 'observationRule', 'type': 'ObservationRule'},
'maximum_reading': {'key': 'maximumReading', 'type': 'float'},
'begin_date': {'key': 'beginDate', 'type': 'iso-8601'},
'end_date': {'key': 'endDate', 'type': 'iso-8601'},
'udfs': {'key': 'udfs', 'type': '[UDFFieldChild]'},
}
def __init__(self, *, channel_version_id: int=None, multiplier: float=None, unit=None, observation_rule=None, maximum_reading: float=None, begin_date=None, end_date=None, udfs=None, **kwargs) -> None:
super(ChannelVersionResponse, self).__init__(**kwargs)
self.channel_version_id = channel_version_id
self.multiplier = multiplier
self.unit = unit
self.observation_rule = observation_rule
self.maximum_reading = maximum_reading
self.begin_date = begin_date
self.end_date = end_date
self.udfs = udfs | PypiClean |
/Colr-0.9.1.tar.gz/Colr-0.9.1/colr/colr.py | from contextlib import suppress # type: ignore
from functools import partial
import ctypes
import math
import os
import struct
import sys
from types import GeneratorType
from typing import ( # noqa
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Set, # Set is used as a 'type: Set' comment in `get_known_codes()`.
Tuple,
Union,
cast,
)
from typing.io import IO
from .base import (
ChainedBase,
CodePart,
TextPart,
closing_code,
get_codes,
strip_codes,
)
from .codes import (
_stylemap,
_stylenums,
basic_names,
codeformat,
code_nums,
code_nums_reverse,
codes,
codes_reverse,
extbackformat,
extforeformat,
rgbbackformat,
rgbforeformat,
)
from .trans import (
ColorCode,
hex2rgb,
hex2term,
hex2termhex,
)
from .name_data import names as name_data
# Acceptable fore/back args.
ColorArg = Union[str, int, Tuple[int, int, int]]
# Acceptable format_* function args.
FormatArg = Union[int, Tuple[int, int, int]]
# (ColrCodePart/ColrTextPart).code_info() return type.
ColrChainedPartInfo = Tuple[
Optional[str],
Optional[
Union[str, int, Tuple[int, int, int]]
]
]
__all__ = [
'_disabled',
'auto_disable',
'closing_code',
'codeformat',
'code_nums',
'code_nums_reverse',
'codes',
'codes_reverse',
'color',
'Colr',
'disable',
'enable',
'extbackformat',
'extforeformat',
'format_back',
'format_fore',
'get_all_names',
'get_code_num',
'get_code_num_rgb',
'get_codes',
'get_known_codes',
'get_known_name',
'get_terminal_size',
'InvalidArg',
'InvalidColr',
'InvalidFormatArg',
'InvalidFormatColr',
'InvalidEscapeCode',
'InvalidRgbEscapeCode',
'InvalidStyle',
'name_data',
'parse_colr_arg',
'rgbbackformat',
'rgbforeformat',
'strip_codes',
]
# Set with the enable/disable functions.
_disabled = False
# Windows support relies on SetConsoleMode
# These boolean flags are for debugging.
has_windll = False
has_setconsolemode = False
try:
_kernel32 = ctypes.windll.kernel32 # noqa (attribute error during development)
has_windll = True
try:
_kernel32.SetConsoleMode(_kernel32.GetStdHandle(-11), 7)
has_setconsolemode = True
except Exception:
# Windows, but the new ansi code api is not supported.
# Library user will need to call colr.disable() to keep from spewing
# junk characters all over the screen.
pass
del _kernel32
except AttributeError:
# No windll, probably linux. If the system doesn't support ansi escape
# codes the library user will need to call colr.disable().
pass
def auto_disable(
enabled: Optional[bool] = True,
fds: Optional[Sequence[IO]] = (sys.stdout, sys.stderr)) -> None:
""" Automatically decide whether to disable color codes if stdout or
stderr are not ttys.
Arguments:
enabled : Whether to automatically disable color codes.
When set to True, the fds will be checked for ttys.
When set to False, enable() is called.
fds : Open file descriptors to check for ttys.
If any non-ttys are found, colors will be disabled.
Objects must have a isatty() method.
"""
if enabled:
allttys = all(
getattr(f, 'isatty', lambda: False)()
for f in cast(Sequence[IO], fds)
)
if not allttys:
disable()
else:
enable()
def disable() -> None:
""" Disable color codes for Colr and the convenience color() function.
Created to be used by auto_disable(), for piping output to file or
other commands.
"""
global _disabled
_disabled = True
def disabled() -> bool:
""" Public access to _disabled. """
return _disabled
def enable() -> None:
""" Enable color codes for Colr and the convenience color() function.
This only needs to be called if disable() was called previously.
"""
global _disabled
_disabled = False
def enabled() -> bool:
""" Public access to _disabled. """
return not _disabled
def _format_code(
number: FormatArg,
backcolor: Optional[bool] = False,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a fore/back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
format_fore/format_back wrap this function to reduce code duplication.
Arguments:
number : Integer or RGB tuple to format into an escape code.
backcolor : Whether this is for a back color, otherwise it's fore.
light : Whether this should be a 'light' color.
extended : Whether this should be an extended (256) color.
If `light` and `extended` are both given, only `light` is used.
"""
if backcolor:
codetype = 'back'
# A dict of codeformat funcs. These funcs return an escape code str.
formatters = {
'code': lambda n: codeformat(40 + n),
'lightcode': lambda n: codeformat(100 + n),
'ext': lambda n: extbackformat(n),
'rgb': lambda r, g, b: rgbbackformat(r, g, b),
} # type: Dict[str, Callable[..., str]]
else:
codetype = 'fore'
formatters = {
'code': lambda n: codeformat(30 + n),
'lightcode': lambda n: codeformat(90 + n),
'ext': lambda n: extforeformat(n),
'rgb': lambda r, g, b: rgbforeformat(r, g, b),
}
try:
r, g, b = (int(x) for x in number) # type: ignore
except (TypeError, ValueError):
# Not an rgb code.
# This variable, and it's cast is only to satisfy the type checks.
try:
n = int(cast(int, number))
except ValueError:
# Not an rgb code, or a valid code number.
raise InvalidColr(
number,
'Expecting RGB or 0-255 for {} code.'.format(codetype)
)
if light:
if not in_range(n, 0, 9):
raise InvalidColr(
n,
'Expecting 0-9 for light {} code.'.format(codetype)
)
return formatters['lightcode'](n)
elif extended:
if not in_range(n, 0, 255):
raise InvalidColr(
n,
'Expecting 0-255 for ext. {} code.'.format(codetype)
)
return formatters['ext'](n)
if not in_range(n, 0, 9):
raise InvalidColr(
n,
'Expecting 0-9 for {} code.'.format(codetype)
)
return formatters['code'](n)
# Rgb code.
try:
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidColr(
(r, g, b),
'RGB value for {} not in range 0-255.'.format(codetype)
)
except TypeError:
# Was probably a 3-char string. Not an rgb code though.
raise InvalidColr(
(r, g, b),
'RGB value for {} contains invalid number.'.format(codetype)
)
return formatters['rgb'](r, g, b)
def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
)
def format_fore(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a fore color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=False,
light=light,
extended=extended
)
def format_style(number: int) -> str:
""" Return an escape code for a style, by number.
This handles invalid style numbers.
"""
if str(number) not in _stylenums:
raise InvalidStyle(number)
return codeformat(number)
def get_all_names() -> Tuple[str, ...]:
""" Retrieve a tuple of all known color names, basic and 'known names'.
"""
names = list(basic_names)
names.extend(name_data)
return tuple(sorted(set(names)))
def get_code_num(s: str) -> Optional[int]:
""" Get code number from an escape code.
Raises InvalidEscapeCode if an invalid number is found.
"""
if ';' in s:
# Extended fore/back codes.
numberstr = s.rpartition(';')[-1][:-1]
else:
# Fore, back, style, codes.
numberstr = s.rpartition('[')[-1][:-1]
num = try_parse_int(
numberstr,
default=None,
minimum=0,
maximum=255
)
if num is None:
raise InvalidEscapeCode(numberstr)
return num
def get_code_num_rgb(s: str) -> Optional[Tuple[int, int, int]]:
""" Get rgb code numbers from an RGB escape code.
Raises InvalidRgbEscapeCode if an invalid number is found.
"""
parts = s.split(';')
if len(parts) != 5:
raise InvalidRgbEscapeCode(s, reason='Count is off.')
rgbparts = parts[-3:]
if not rgbparts[2].endswith('m'):
raise InvalidRgbEscapeCode(s, reason='Missing \'m\' on the end.')
rgbparts[2] = rgbparts[2].rstrip('m')
try:
r, g, b = [int(x) for x in rgbparts]
except ValueError as ex:
raise InvalidRgbEscapeCode(s) from ex
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgbEscapeCode(s, reason='Not in range 0-255.')
return r, g, b
def get_known_codes(
s: Union[str, 'Colr'],
unique: Optional[bool] = True,
rgb_mode: Optional[bool] = False):
""" Get all known escape codes from a string, and yield the explanations.
"""
isdisabled = disabled()
orderedcodes = tuple((c, get_known_name(c)) for c in get_codes(s))
codesdone = set() # type: Set[str]
for code, codeinfo in orderedcodes:
# Do the codes in order, but don't do the same code twice.
if unique:
if code in codesdone:
continue
codesdone.add(code)
if codeinfo is None:
continue
codetype, name = codeinfo
typedesc = '{:>13}: {!r:<23}'.format(codetype.title(), code)
if codetype.startswith(('extended', 'rgb')):
if isdisabled:
codedesc = str(ColorCode(name, rgb_mode=rgb_mode))
else:
codedesc = ColorCode(name, rgb_mode=rgb_mode).example()
else:
codedesc = ''.join((
code,
str(name).lstrip('(').rstrip(')'),
codes['style']['reset_all']
))
yield ' '.join((
typedesc,
codedesc
))
def get_known_name(s: str) -> Optional[Tuple[str, ColorArg]]:
""" Reverse translate a terminal code to a known color name, if possible.
Returns a tuple of (codetype, knownname) on success.
Returns None on failure.
"""
if not s.endswith('m'):
# All codes end with 'm', so...
return None
if s.startswith('\033[38;5;'):
# Extended fore.
name = codes_reverse['fore'].get(s, None)
if name is None:
num = cast(int, get_code_num(s))
return ('extended fore', num)
else:
return ('extended fore', name)
elif s.startswith('\033[48;5;'):
# Extended back.
name = codes_reverse['back'].get(s, None)
if name is None:
num = cast(int, get_code_num(s))
return ('extended back', num)
else:
return ('extended back', name)
elif s.startswith('\033[38;2'):
# RGB fore.
vals = get_code_num_rgb(s)
if vals is not None:
return ('rgb fore', vals)
elif s.startswith('\033[48;2'):
# RGB back.
vals = get_code_num_rgb(s)
if vals is not None:
return ('rgb back', vals)
elif s.startswith('\033['):
# Fore, back, style.
number = cast(int, get_code_num(s))
# Get code type based on number.
if (number <= 7) or (number == 22):
codetype = 'style'
elif (((number >= 30) and (number < 40)) or
((number >= 90) and (number < 100))):
codetype = 'fore'
elif (((number >= 40) and (number < 50)) or
((number >= 100) and (number < 110))):
codetype = 'back'
else:
raise InvalidEscapeCode(
number,
'Expecting 0-7, 22, 30-39, or 40-49 for escape code',
)
name = codes_reverse[codetype].get(s, None)
if name is not None:
return (codetype, name)
# Not a known escape code.
return None
def get_terminal_size(default=(80, 35)):
""" Return terminal (width, height) """
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
return cr
except (ImportError, EnvironmentError):
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except EnvironmentError:
pass
if not cr:
try:
cr = os.environ['LINES'], os.environ['COLUMNS']
except KeyError:
return default
return int(cr[1]), int(cr[0])
def in_range(x: int, minimum: int, maximum: int) -> bool:
""" Return True if x is >= minimum and <= maximum. """
return (x >= minimum and x <= maximum)
def parse_colr_arg(
s: str,
default: Optional[Any] = None,
rgb_mode: Optional[bool] = False) -> ColorArg:
""" Parse a user argument into a usable fore/back color value for Colr.
If a falsey value is passed, default is returned.
Raises InvalidColr if the argument is unusable.
Returns: A usable value for Colr(fore/back).
This validates basic/extended color names.
This validates the range for basic/extended values (0-255).
This validates the length/range for rgb values (0-255, 0-255, 0-255).
Arguments:
s : User's color value argument.
Example: "1", "255", "black", "25,25,25"
"""
if not s:
return cast(ColorArg, default)
val = s.strip().lower()
try:
# Try as int.
intval = int(val)
except ValueError:
# Try as rgb.
try:
r, g, b = (int(x.strip()) for x in val.split(','))
except ValueError:
if ',' in val:
# User tried rgb value and failed.
raise InvalidColr(val)
# Try as name (fore/back have the same names)
code = codes['fore'].get(val, None)
if code:
# Valid basic code from fore, bask, or style.
return val
# Not a basic code, try known names.
named_data = name_data.get(val, None)
if named_data is not None:
# A known named color.
return val
# Not a basic/extended/known name, try as hex.
try:
if rgb_mode:
return hex2rgb(val, allow_short=True)
return hex2termhex(val, allow_short=True)
except ValueError:
raise InvalidColr(val)
else:
# Got rgb. Do some validation.
if not all((in_range(x, 0, 255) for x in (r, g, b))):
raise InvalidColr(val)
# Valid rgb.
return r, g, b
else:
# Int value.
if not in_range(intval, 0, 255):
# May have been a hex value confused as an int.
if len(val) in (3, 6):
try:
if rgb_mode:
return hex2rgb(val, allow_short=True)
return hex2termhex(val, allow_short=True)
except ValueError:
raise InvalidColr(val)
raise InvalidColr(intval)
# Valid int value.
return intval
def try_parse_int(
s: str,
default: Optional[Any] = None,
minimum: Optional[int] = None,
maximum: Optional[int] = None) -> Optional[Any]:
""" Try parsing a string into an integer.
On failure, return `default`.
If the number is less then `minimum` or greater than `maximum`,
return `default`.
Returns an integer on success.
"""
try:
n = int(s)
except ValueError:
return default
if (minimum is not None) and (n < minimum):
return default
elif (maximum is not None) and (n > maximum):
return default
return n
class Colr(ChainedBase):
""" This class colorizes text for an ansi terminal. """
# Known offsets for `Colr.rainbow` that will start with a certain color.
gradient_names = {
'green': 0,
'orange': 9,
'lightred': 15,
'magenta': 20,
'red': 80,
'yellow': 62,
'blue': 34,
'cyan': 48,
}
def __init__(
self,
text: Optional[str] = None,
fore: Optional[ColorArg] = None,
back: Optional[ColorArg] = None,
style: Optional[str] = None,
no_closing: Optional[bool] = False) -> None:
""" Initialize a Colr object with text and color options. """
# Can be initialized with colored text, not required though.
self.data = self.color(
text,
fore=fore,
back=back,
style=style,
no_closing=no_closing,
)
def __call__(self, text=None, fore=None, back=None, style=None):
""" Append text to this Colr object. """
self.data = ''.join((
self.data,
self.color(text=text, fore=fore, back=back, style=style)
))
return self
def __dir__(self):
""" Compute the fake method names, and include them in a listing
of attributes for autocompletion/inspection.
"""
def fmtcode(s):
try:
int(s)
return 'f_{}'.format(s)
except ValueError:
return s
def fmtbgcode(s):
try:
int(s)
return 'b_{}'.format(s)
except ValueError:
return 'bg{}'.format(s)
attrs = [fmtcode(k) for k in codes['fore']]
attrs.extend(fmtbgcode(k) for k in codes['back'])
attrs.extend(k for k in codes['style'])
attrs.extend((
'center',
'chained',
'color_code',
'color',
'data',
'format',
'gradient',
'join',
'ljust',
'print',
'rjust',
'str'
))
return attrs
def __format__(self, fmt):
""" Allow format specs to apply to self.data, such as <, >, and ^.
This adds a few color-specific features to the format_spec,
not found in the `ChainedBase` class.
Colr format spec example:
'{:[fore=COLOR, back=COLOR, style=STYLE]}'
..where COLOR is a stringified version of a valid color arg,
such as a known name, number, hex code, or RGB value (R;G;B).
RGB values should be separated with a semicolon, like:
'{x:[fore=255;255;255]}'.format(x=Colr('test'))
Also, `f`,`b`, and `s` are accepted for `fore`, `back`,
and `style`.
Example:
'Hello {x:[fore=red, back=white, style=bright]}'.format(
x=Colr('Test')
)
Note, if any conversion is done on the object beforehand
(using !s, !a, !r, and friends) this method is never called.
It only deals with the `format_spec` described in
`help('FORMATTING')`.
"""
if not fmt:
return str(self)
if not (('[' in fmt) and (']' in fmt)):
# No color specs found in the format.
return super().__format__(fmt)
# Has color specifications. Parse them out.
normalspec, _, spec = fmt.partition('[')
spec = spec.rstrip(']').strip().lower()
specargs = self._parse_colr_spec(spec)
try:
clr = Colr(str(self), **specargs)
except InvalidColr as ex:
raise InvalidFormatColr(spec, ex.value) from None
if normalspec:
# Apply non-color-specific format specs from ChainedBase.
return super(self.__class__, clr).__format__(normalspec)
return str(clr)
def __getattr__(self, attr):
""" If the attribute matches a fore, back, or style name,
return the color() function. Otherwise, return known
attributes and raise AttributeError for others.
"""
knownmethod = self._attr_to_method(attr)
if knownmethod is not None:
return knownmethod
try:
val = self.__getattribute__(attr)
except AttributeError as ex:
try:
val = self.data.__getattribute__(attr)
except AttributeError:
raise AttributeError(ex)
return val
def _attr_to_method(self, attr):
""" Return the correct color function by method name.
Uses `partial` to build kwargs on the `chained` func.
On failure/unknown name, returns None.
"""
if attr in codes['fore']:
# Fore method
return partial(self.chained, fore=attr)
elif attr in codes['style']:
# Style method
return partial(self.chained, style=attr)
elif attr.startswith('bg'):
# Back method
name = attr[2:].lstrip('_')
if name in codes['back']:
return partial(self.chained, back=name)
elif attr.startswith(('b256_', 'b_')):
# Back 256 method
# Remove the b256_ portion.
name = attr.partition('_')[2]
return self._ext_attr_to_partial(name, 'back')
elif attr.startswith(('f256_', 'f_')):
# Fore 256 method
name = attr.partition('_')[2]
return self._ext_attr_to_partial(name, 'fore')
return None
@classmethod
def _call_dunder_colr(cls, obj):
""" Call __colr__ on an object, after some checks.
If color is disabled, the object itself is returned.
If __colr__ doesn't return a Colr instance, TypeError is raised.
On success, a Colr instance is returned from obj.__colr__().
"""
if _disabled:
# No colorization when disabled. Just use str.
return obj
clr = obj.__colr__()
if not isinstance(clr, cls):
# __colr__ should always return a Colr.
# Future development may assume a Colr was returned.
raise TypeError(
' '.join((
'__colr__ methods should return a {} instance.',
'Got: {}',
)).format(
cls.__name__,
type(clr).__name__,
)
)
return clr
def _ext_attr_to_partial(self, name, kwarg_key):
""" Convert a string like '233' or 'aliceblue' into partial for
self.chained.
"""
try:
intval = int(name)
except ValueError:
# Try as an extended name_data name.
info = name_data.get(name, None)
if info is None:
# Not an int value or name_data name.
return None
kws = {kwarg_key: info['code']}
return partial(self.chained, **kws)
# Integer str passed, use the int value.
kws = {kwarg_key: intval}
return partial(self.chained, **kws)
def _gradient_black_line(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False, rgb_mode=False):
""" Yield colorized characters,
within the 24-length black gradient.
"""
if start < 232:
start = 232
elif start > 255:
start = 255
if reverse:
codes = list(range(start, 231, -1))
else:
codes = list(range(start, 256))
return ''.join((
self._iter_text_wave(
text,
codes,
step=step,
fore=fore,
back=back,
style=style,
rgb_mode=rgb_mode
)
))
def _gradient_black_lines(
self, text, start, step=1,
fore=None, back=None, style=None, reverse=False,
movefactor=2, rgb_mode=False):
""" Yield colorized characters,
within the 24-length black gradient,
treating each line separately.
"""
if not movefactor:
def factor(i):
return start
else:
# Increase the start for each line.
def factor(i):
return start + (i * movefactor)
return '\n'.join((
self._gradient_black_line(
line,
start=factor(i),
step=step,
fore=fore,
back=back,
style=style,
reverse=reverse,
rgb_mode=rgb_mode,
)
for i, line in enumerate(text.splitlines())
))
def _gradient_rgb_line(
self, text, start, stop, step=1,
fore=None, back=None, style=None):
""" Yield colorized characters, morphing from one rgb value to
another.
"""
return self._gradient_rgb_line_from_morph(
text,
list(self._morph_rgb(start, stop, step=step)),
fore=fore,
back=back,
style=style
)
def _gradient_rgb_line_from_morph(
self, text, morphlist, fore=None, back=None, style=None):
""" Yield colorized characters, morphing from one rgb value to
another.
"""
try:
listlen = len(morphlist)
except TypeError:
morphlist = list(morphlist)
listlen = len(morphlist)
neededsteps = listlen // len(text)
iterstep = 1
if neededsteps > iterstep:
# Skip some members of morphlist, to be sure to reach the end.
iterstep = neededsteps
usevals = morphlist
if iterstep > 1:
# Rebuild the morphlist, skipping some.
usevals = [usevals[i] for i in range(0, listlen, iterstep)]
return ''.join((
self._iter_text_wave(
text,
usevals,
fore=fore,
back=back,
style=style,
rgb_mode=False,
)
))
def _gradient_rgb_lines(
self, text, start, stop, step=1,
fore=None, back=None, style=None, movefactor=None):
""" Yield colorized characters, morphing from one rgb value to
another. This treats each line separately.
"""
morphlist = list(self._morph_rgb(start, stop, step=step))
if movefactor:
# Moving means we need the morph to wrap around.
morphlist.extend(self._morph_rgb(stop, start, step=step))
if movefactor < 0:
# Increase the start for each line.
def move():
popped = []
for _ in range(abs(movefactor)):
try:
popped.append(morphlist.pop(0))
except IndexError:
pass
morphlist.extend(popped)
return morphlist
else:
# Decrease start for each line.
def move():
for _ in range(movefactor):
try:
val = morphlist.pop(-1)
except IndexError:
pass
else:
morphlist.insert(0, val)
return morphlist
return '\n'.join((
self._gradient_rgb_line_from_morph(
line,
move() if movefactor else morphlist,
fore=fore,
back=back,
style=style,
)
for i, line in enumerate(text.splitlines())
))
def _iter_text_wave(
self, text, numbers, step=1,
fore=None, back=None, style=None, rgb_mode=False):
""" Yield colorized characters from `text`, using a wave of `numbers`.
Arguments:
text : String to be colorized.
numbers : A list/tuple of numbers (256 colors).
step : Number of characters to colorize per color.
fore : Fore color to use (name or number).
(Back will be gradient)
back : Background color to use (name or number).
(Fore will be gradient)
style : Style name to use.
rgb_mode : Use number for rgb value.
This should never be used when the numbers
are rgb values themselves.
"""
if fore and back:
raise ValueError('Both fore and back colors cannot be specified.')
pos = 0
end = len(text)
numbergen = self._iter_wave(numbers)
def make_color(n):
try:
r, g, b = n
except TypeError:
if rgb_mode:
return n, n, n
return n
return r, g, b
for value in numbergen:
lastchar = pos + step
yield self.color(
text[pos:lastchar],
fore=make_color(value) if fore is None else fore,
back=make_color(value) if fore is not None else back,
style=style
)
if lastchar >= end:
numbergen.send(True)
pos = lastchar
@staticmethod
def _iter_wave(iterable, count=0):
""" Move from beginning to end, and then end to beginning, a number of
iterations through an iterable (must accept len(iterable)).
Example:
print(' -> '.join(_iter_wave('ABCD', count=8)))
>> A -> B -> C -> D -> C -> B -> A -> B
If `count` is less than 1, this will run forever.
You can stop it by sending a Truthy value into the generator:
gen = self._iter_wave('test')
for c in gen:
if c == 's':
# Stop the generator early.
gen.send(True)
print(c)
"""
up = True
pos = 0
i = 0
try:
end = len(iterable)
except TypeError:
iterable = list(iterable)
end = len(iterable)
# Stop on count, or run forever.
while (i < count) if count > 0 else True:
try:
stop = yield iterable[pos]
# End of generator (user sent the stop signal)
if stop:
break
except IndexError:
# End of iterable, when len(iterable) is < count.
up = False
# Change directions if needed, otherwise increment/decrement.
if up:
pos += 1
if pos == end:
up = False
pos = end - 2
else:
pos -= 1
if pos < 0:
up = True
pos = 1
i += 1
def _morph_rgb(self, rgb1, rgb2, step=1):
""" Morph an rgb value into another, yielding each step along the way.
"""
pos1, pos2 = list(rgb1), list(rgb2)
indexes = [i for i, _ in enumerate(pos1)]
def step_value(a, b):
""" Returns the amount to add to `a` to make it closer to `b`,
multiplied by `step`.
"""
if a < b:
return step
if a > b:
return -step
return 0
steps = [step_value(pos1[x], pos2[x]) for x in indexes]
stepcnt = 0
while (pos1 != pos2):
stepcnt += 1
stop = yield tuple(pos1)
if stop:
break
for x in indexes:
if pos1[x] != pos2[x]:
pos1[x] += steps[x]
if (steps[x] < 0) and (pos1[x] < pos2[x]):
# Over stepped, negative.
pos1[x] = pos2[x]
if (steps[x] > 0) and (pos1[x] > pos2[x]):
# Over stepped, positive.
pos1[x] = pos2[x]
yield tuple(pos1)
def _parse_colr_spec(self, spec):
""" Parse a Colr spec such as 'fore=red, back=blue, style=bold' into
useable Colr keyword arguments.
Raises InvalidColrFormat on error.
Returns a dict of {'fore': name, 'back': name, 'style': name} on
success.
Arguments:
spec : The format spec.
"""
# Parsed fore,back, and style args if successfully parsed.
specargs = {}
# Valid key names.
validkeys = ('fore', 'back', 'style')
# Shorter aliases to use with the spec keys.
aliases = {s[0]: s for s in validkeys}
# Stack of keys (in order of position) when not using spec-keys.
# They will be popped off as the values are parsed.
unused_keys = list(validkeys)
for kvpairstr in spec.split(','):
kvpairstr = kvpairstr.strip()
if not kvpairstr:
continue
try:
# Key=value style.
k, v = kvpairstr.split('=')
except ValueError:
# Keyless?
try:
k = unused_keys[0]
except IndexError:
# Too many commas/values.
raise InvalidFormatArg(
spec,
kvpairstr,
msg='Too many arguments/values.',
) from None
# Just a value was given, use the positional key for it.
v = kvpairstr
# Handle any aliases that were used.
k = aliases.get(k, k)
# Handle RGB values.
if v.count(';') in (2, 3):
try:
rgb = tuple(int(x) for x in v.split(';'))
except ValueError:
raise InvalidFormatColr(spec, v) from None
specargs[k] = rgb
else:
specargs[k] = v
try:
# Remove from possible keyless keys.
unused_keys.remove(k)
except ValueError:
# Already have all the args we need.
raise InvalidFormatArg(
spec,
k,
msg='Too many arguments/values.',
)
return specargs
def _rainbow_color(self, freq, i):
""" Calculate a single hexcode value for a piece of a rainbow.
Arguments:
freq : "Tightness" of colors (see self.rainbow())
i : Index of character in string to colorize.
"""
return '{:02x}{:02x}{:02x}'.format(*self._rainbow_rgb(freq, i))
def _rainbow_hex_chars(self, s, freq=0.1, spread=3.0, offset=0):
""" Iterate over characters in a string to build data needed for a
rainbow effect.
Yields tuples of (char, hexcode).
Arguments:
s : String to colorize.
freq : Frequency/"tightness" of colors in the rainbow.
Best results when in the range 0.0-1.0.
Default: 0.1
spread : Spread/width of colors.
Default: 3.0
offset : Offset for start of rainbow.
Default: 0
"""
return (
(c, self._rainbow_color(freq, offset + i / spread))
for i, c in enumerate(s)
)
def _rainbow_line(
self, text, freq=0.1, spread=3.0, offset=0,
rgb_mode=False, **colorargs):
""" Create rainbow using the same offset for all text.
Arguments:
text : String to colorize.
freq : Frequency/"tightness" of colors in the rainbow.
Best results when in the range 0.0-1.0.
Default: 0.1
spread : Spread/width of colors.
Default: 3.0
offset : Offset for start of rainbow.
Default: 0
rgb_mode : If truthy, use RGB escape codes instead of
extended 256 and approximate hex match.
Keyword Arguments:
colorargs : Any extra arguments for the color function,
such as fore, back, style.
These need to be treated carefully to not
'overwrite' the rainbow codes.
"""
fore = colorargs.get('fore', None)
back = colorargs.get('back', None)
style = colorargs.get('style', None)
if fore:
color_args = (lambda value: {
'back': value if rgb_mode else hex2term(value),
'style': style,
'fore': fore
})
else:
color_args = (lambda value: {
'fore': value if rgb_mode else hex2term(value),
'style': style,
'back': back
})
if rgb_mode:
method = self._rainbow_rgb_chars
else:
method = self._rainbow_hex_chars
return ''.join(
self.color(c, **color_args(hval))
for c, hval in method(
text,
freq=freq,
spread=spread,
offset=offset)
)
def _rainbow_lines(
self, text, freq=0.1, spread=3.0, offset=0, movefactor=0,
rgb_mode=False, **colorargs):
""" Create rainbow text, using the same offset for each line.
Arguments:
text : String to colorize.
freq : Frequency/"tightness" of colors in the rainbow.
Best results when in the range 0.0-1.0.
Default: 0.1
spread : Spread/width of colors.
Default: 3.0
offset : Offset for start of rainbow.
Default: 0
movefactor : Factor for offset increase on each new line.
Default: 0
rgb_mode : If truthy, use RGB escape codes instead of
extended 256 and approximate hex match.
Keyword Arguments:
fore, back, style : Other args for the color() function.
"""
if not movefactor:
def factor(i):
return offset
else:
# Increase the offset for each line.
def factor(i):
return offset + (i * movefactor)
return '\n'.join(
self._rainbow_line(
line,
freq=freq,
spread=spread,
offset=factor(i),
rgb_mode=rgb_mode,
**colorargs)
for i, line in enumerate(text.splitlines()))
def _rainbow_rgb(self, freq, i):
""" Calculate a single rgb value for a piece of a rainbow.
Arguments:
freq : "Tightness" of colors (see self.rainbow())
i : Index of character in string to colorize.
"""
# Borrowed from lolcat, translated from ruby.
red = math.sin(freq * i + 0) * 127 + 128
green = math.sin(freq * i + 2 * math.pi / 3) * 127 + 128
blue = math.sin(freq * i + 4 * math.pi / 3) * 127 + 128
return int(red), int(green), int(blue)
def _rainbow_rgb_chars(self, s, freq=0.1, spread=3.0, offset=0):
""" Iterate over characters in a string to build data needed for a
rainbow effect.
Yields tuples of (char, (r, g, b)).
Arguments:
s : String to colorize.
freq : Frequency/"tightness" of colors in the rainbow.
Best results when in the range 0.0-1.0.
Default: 0.1
spread : Spread/width of colors.
Default: 3.0
offset : Offset for start of rainbow.
Default: 0
"""
return (
(c, self._rainbow_rgb(freq, offset + i / spread))
for i, c in enumerate(s)
)
def b_hex(self, value, text=None, fore=None, style=None, rgb_mode=False):
""" A chained method that sets the back color to an hex value.
Arguments:
value : Hex value to convert.
text : Text to style if not building up color codes.
fore : Fore color for the text.
style : Style for the text.
rgb_mode : If False, the closest extended code is used,
otherwise true color (rgb) mode is used.
"""
if rgb_mode:
try:
colrval = hex2rgb(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
else:
try:
colrval = hex2term(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
return self.chained(text=text, fore=fore, back=colrval, style=style)
def b_rgb(self, r, g, b, text=None, fore=None, style=None):
""" A chained method that sets the back color to an RGB value.
Arguments:
r : Red value.
g : Green value.
b : Blue value.
text : Text to style if not building up color codes.
fore : Fore color for the text.
style : Style for the text.
"""
return self.chained(text=text, fore=fore, back=(r, g, b), style=style)
def chained(self, text=None, fore=None, back=None, style=None):
""" Called by the various 'color' methods to colorize a single string.
The RESET_ALL code is appended to the string unless text is empty.
Raises InvalidColr on invalid color names.
Arguments:
text : String to colorize, or None for BG/Style change.
fore : Name of fore color to use.
back : Name of back color to use.
style : Name of style to use.
"""
self.data = ''.join((
self.data,
self.color(text=text, fore=fore, back=back, style=style),
))
return self
def color(
self, text=None, fore=None, back=None, style=None,
no_closing=False):
""" A method that colorizes strings, not Colr objects.
Raises InvalidColr for invalid color names.
The 'reset_all' code is appended if text is given.
"""
has_args = (
(fore is not None) or
(back is not None) or
(style is not None)
)
if hasattr(text, '__colr__') and not has_args:
# Use custom __colr__ method in the absence of arguments.
return str(self._call_dunder_colr(text))
# Stringify everything before operating on it.
text = str(text) if text is not None else ''
if _disabled:
return text
# Considered to have unclosed codes if embedded codes exist and
# the last code was not a color code.
embedded_codes = get_codes(text)
has_end_code = embedded_codes and embedded_codes[-1] == closing_code
# Add closing code if not already added, there is text, and
# some kind of color/style was used (whether from args, or
# color codes were included in the text already).
# If the last code embedded in the text was a closing code,
# then it is not added.
# This can be overriden with `no_closing`.
needs_closing = (
text and
(not no_closing) and
(not has_end_code) and
(has_args or embedded_codes)
)
if needs_closing:
end = closing_code
else:
end = ''
return ''.join((
self.color_code(fore=fore, back=back, style=style),
text,
end,
))
def color_code(self, fore=None, back=None, style=None):
""" Return the codes for this style/colors. """
# Map from style type to raw code formatter function.
colorcodes = []
resetcodes = []
userstyles = {'style': style, 'back': back, 'fore': fore}
for stype in userstyles:
stylearg = userstyles.get(stype, None)
if not stylearg:
# No value for this style name, don't use it.
continue
# Get escape code for this style.
code = self.get_escape_code(stype, stylearg)
stylename = str(stylearg).lower()
if (stype == 'style') and (stylename in ('0', )):
resetcodes.append(code)
elif stylename.startswith('reset'):
resetcodes.append(code)
else:
colorcodes.append(code)
# Reset codes come first, to not override colors.
return ''.join((''.join(resetcodes), ''.join(colorcodes)))
def color_dummy(self, text=None, **kwargs):
""" A wrapper for str() that matches self.color().
For overriding when _auto_disable is used.
"""
return str(text) if text is not None else ''
def format(self, *args, **kwargs):
""" Like str.format, except it returns a Colr. """
return self.__class__(self.data.format(*args, **kwargs))
def get_escape_code(self, codetype, value):
""" Convert user arg to escape code. """
valuefmt = str(value).lower()
code = codes[codetype].get(valuefmt, None)
if code:
# Basic code from fore, back, or style.
return code
named_funcs = {
'fore': format_fore,
'back': format_back,
'style': format_style,
}
# Not a basic code, try known names.
converter = named_funcs.get(codetype, None)
if converter is None:
raise ValueError(
'Invalid code type. Expecting {}, got: {!r}'.format(
', '.join(named_funcs),
codetype
)
)
# Try as hex.
with suppress(ValueError):
value = int(hex2term(value, allow_short=True))
return converter(value, extended=True)
named_data = name_data.get(valuefmt, None)
if named_data is not None:
# A known named color.
try:
return converter(named_data['code'], extended=True)
except TypeError:
# Passing a known name as a style?
if codetype == 'style':
raise InvalidStyle(value)
raise
# Not a known color name/value, try rgb.
try:
r, g, b = (int(x) for x in value)
# This does not mean we have a 3 int tuple. It could be '111'.
# The converter should catch it though.
except (TypeError, ValueError):
# Not an rgb value.
if codetype == 'style':
raise InvalidStyle(value)
try:
escapecode = converter(value)
except ValueError as ex:
raise InvalidColr(value) from ex
return escapecode
def gradient(
self, text=None, name=None, fore=None, back=None, style=None,
freq=0.1, spread=None, linemode=True,
movefactor=2, rgb_mode=False):
""" Return a gradient by color name. Uses rainbow() underneath to
build the gradients, starting at a known offset.
Arguments:
text : Text to make gradient (self.data when not given).
The gradient text is joined to self.data when
this is used.
name : Color name for the gradient (same as fore names).
Default: black
fore : Fore color. Back will be gradient when used.
Default: None (fore is gradient)
back : Back color. Fore will be gradient when used.
Default: None (back=reset/normal)
style : Style for the gradient.
Default: None (reset/normal)
freq : Frequency of color change.
Higher means more colors.
Best when in the 0.0-1.0 range.
Default: 0.1
spread : Spread/width of each color (in characters).
Default: 3.0 for colors, 1 for black/white
linemode : Colorize each line in the input.
Default: True
movefactor : Factor for offset increase on each line when
using linemode.
Minimum value: 0
Default: 2
rgb_mode : Use true color (rgb) codes.
"""
try:
# Try explicit offset (passed in with `name`).
offset = int(name)
except (TypeError, ValueError):
name = name.lower().strip() if name else 'black'
# Black and white are separate methods.
if name == 'black':
return self.gradient_black(
text=text,
fore=fore,
back=back,
style=style,
step=int(spread) if spread else 1,
linemode=linemode,
movefactor=movefactor,
rgb_mode=rgb_mode
)
elif name == 'white':
return self.gradient_black(
text=text,
fore=fore,
back=back,
style=style,
step=int(spread) if spread else 1,
linemode=linemode,
movefactor=movefactor,
reverse=True,
rgb_mode=rgb_mode
)
try:
# Get rainbow offset from known name.
offset = self.gradient_names[name]
except KeyError:
raise ValueError('Unknown gradient name: {}'.format(name))
return self.rainbow(
text=text,
fore=fore,
back=back,
style=style,
offset=offset,
freq=freq,
spread=spread or 3.0,
linemode=linemode,
movefactor=movefactor,
rgb_mode=rgb_mode,
)
def gradient_black(
self, text=None, fore=None, back=None, style=None,
start=None, step=1, reverse=False,
linemode=True, movefactor=2, rgb_mode=False):
""" Return a black and white gradient.
Arguments:
text : String to colorize.
This will always be greater than 0.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting 256-color number.
The `start` will be adjusted if it is not within
bounds.
This will always be > 15.
This will be adjusted to fit within a 6-length
gradient, or the 24-length black/white gradient.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
linemode : Colorize each line in the input.
Default: True
movefactor : Factor for offset increase on each line when
using linemode.
Minimum value: 0
Default: 2
rgb_mode : Use true color (rgb) method and codes.
"""
gradargs = {
'step': step,
'fore': fore,
'back': back,
'style': style,
'reverse': reverse,
'rgb_mode': rgb_mode,
}
if linemode:
gradargs['movefactor'] = 2 if movefactor is None else movefactor
method = self._gradient_black_lines
else:
method = self._gradient_black_line
if text:
return self.__class__(
''.join((
self.data or '',
method(
text,
start or (255 if reverse else 232),
**gradargs)
))
)
# Operating on self.data.
return self.__class__(
method(
self.stripped(),
start or (255 if reverse else 232),
**gradargs)
)
def gradient_rgb(
self, text=None, fore=None, back=None, style=None,
start=None, stop=None, step=1, linemode=True, movefactor=0):
""" Return a black and white gradient.
Arguments:
text : String to colorize.
fore : Foreground color, background will be gradient.
back : Background color, foreground will be gradient.
style : Name of style to use for the gradient.
start : Starting rgb value.
stop : Stopping rgb value.
step : Number of characters to colorize per color.
This allows a "wider" gradient.
This will always be greater than 0.
linemode : Colorize each line in the input.
Default: True
movefactor : Amount to shift gradient for each line when
`linemode` is set.
"""
gradargs = {
'step': step,
'fore': fore,
'back': back,
'style': style,
}
start = start or (0, 0, 0)
stop = stop or (255, 255, 255)
if linemode:
method = self._gradient_rgb_lines
gradargs['movefactor'] = movefactor
else:
method = self._gradient_rgb_line
if text:
return self.__class__(
''.join((
self.data or '',
method(
text,
start,
stop,
**gradargs
),
))
)
# Operating on self.data.
return self.__class__(
method(
self.stripped(),
start,
stop,
**gradargs
)
)
def hex(self, value, text=None, back=None, style=None, rgb_mode=False):
""" A chained method that sets the fore color to an hex value.
Arguments:
value : Hex value to convert.
text : Text to style if not building up color codes.
back : Back color for the text.
style : Style for the text.
rgb_mode : If False, the closest extended code is used,
otherwise true color (rgb) mode is used.
"""
if rgb_mode:
try:
colrval = hex2rgb(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
else:
try:
colrval = hex2term(value, allow_short=True)
except ValueError:
raise InvalidColr(value)
return self.chained(text=text, fore=colrval, back=back, style=style)
def iter_parts(self, text=None):
""" Iterate over ColrCodeParts and TextParts, in the order
they are discovered from `self.data`.
This overrides the `ChainedBase.iter_parts` to yield
`ColrCodeParts` instead of `CodeParts`. They contain more info
about the codes, like code type and color name/value.
"""
for part in super().iter_parts(text=text):
if isinstance(part, CodePart):
# ColrCodePart will add some info about the code.
yield ColrCodePart.from_codepart(part)
else:
# Compatible attributes with ColrCodePart.
yield ColrTextPart.from_textpart(part)
def join(self, *colrs, **colorkwargs):
""" Like str.join, except it returns a Colr.
Arguments:
colrs : One or more Colrs. If a list or tuple is passed as an
argument it will be flattened.
Keyword Arguments:
fore, back, style...
see color().
"""
flat = []
for clr in colrs:
if isinstance(clr, (list, tuple, GeneratorType)):
# Flatten any lists, at least once.
flat.extend(str(c) for c in clr)
else:
flat.append(str(clr))
if colorkwargs:
fore = colorkwargs.get('fore', None)
back = colorkwargs.get('back', None)
style = colorkwargs.get('style', None)
flat = (
self.color(s, fore=fore, back=back, style=style)
for s in flat
)
return self.__class__(self.data.join(flat))
def lstrip(self, chars=None):
""" Like str.lstrip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('lstrip', chars),
no_closing=chars and (closing_code in chars),
)
def print(self, *args, **kwargs):
""" Chainable print method. Prints self.data and then clears it. """
print(self, *args, **kwargs)
self.data = ''
return self
def rainbow(
self, text=None, fore=None, back=None, style=None,
freq=0.1, offset=30, spread=3.0,
linemode=True, movefactor=2, rgb_mode=False):
""" Make rainbow gradient text.
Arguments:
text : Text to make gradient.
Default: self.data
fore : Fore color to use (makes back the rainbow).
Default: None
back : Back color to use (makes fore the rainbow).
Default: None
style : Style for the rainbow.
Default: None
freq : Frequency of color change, a higher value means
more colors.
Best results when in the range 0.0-1.0.
Default: 0.1
offset : Offset for start of rainbow.
Default: 30
spread : Spread/width of each color.
Default: 3.0,
linemode : Colorize each line in the input.
Default: True
movefactor : Factor for offset increase on each line when
using linemode.
Minimum value: 0
Default: 2
rgb_mode : Use RGB escape codes instead of extended 256 and
approximate hex matches.
"""
if fore and back:
raise ValueError('Cannot use both fore and back with rainbow()')
rainbowargs = {
'freq': freq,
'spread': spread,
'offset': offset,
'fore': fore,
'back': back,
'style': style,
'rgb_mode': rgb_mode,
}
if linemode:
rainbowargs['movefactor'] = movefactor
method = self._rainbow_lines
else:
method = self._rainbow_line
if text:
# Prepend existing self.data to the rainbow text.
return self.__class__(
''.join((
self.data,
method(text, **rainbowargs)
))
)
# Operate on self.data.
return self.__class__(
method(self.stripped(), **rainbowargs)
)
def rgb(self, r, g, b, text=None, back=None, style=None):
""" A chained method that sets the fore color to an RGB value.
Arguments:
r : Red value.
g : Green value.
b : Blue value.
text : Text to style if not building up color codes.
back : Back color for the text.
style : Style for the text.
"""
return self.chained(text=text, fore=(r, g, b), back=back, style=style)
def rstrip(self, chars=None):
""" Like str.rstrip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('rstrip', chars),
no_closing=chars and (closing_code in chars),
)
def strip(self, chars=None):
""" Like str.strip, except it returns the Colr instance. """
return self.__class__(
self._str_strip('strip', chars),
no_closing=chars and (closing_code in chars),
)
class ColrCodePart(CodePart):
""" A CodePart(ChainedPart) from base.py that adds more info about the
color code, like the code type (fore, back, style), and a known
color name.
"""
def __init__(self, originstr, start=None, stop=None):
super().__init__(originstr, start=start, stop=stop)
self.code_type, self.code_name = self.code_info()
def code_info(self) -> ColrChainedPartInfo:
""" Find the code type and color name/value from self.data. """
if not self.data:
return (None, None)
known_info = get_known_name(self.data)
if known_info is None:
return (None, None)
return known_info
@classmethod
def from_codepart(cls, part: CodePart) -> 'ColrCodePart':
""" Copy the info from a CodePart, and return a new ColrCodePart. """
colrpart = cls('', start=part.start, stop=part.stop)
colrpart.data = part.data
colrpart.code_type, colrpart.code_name = colrpart.code_info()
return colrpart
class ColrTextPart(TextPart):
""" A TextPart(ChainedPart) from base.py that is compatible with
the ColrCodePart.
"""
def __init__(self, originstr, start=None, stop=None):
super().__init__(originstr, start=start, stop=stop)
self.code_type = None
self.code_name = None
def code_info(self) -> ColrChainedPartInfo:
return None, None
@classmethod
def from_textpart(cls, part: TextPart) -> 'ColrTextPart':
""" Copy the info from a TextPart, and return a new ColrTextPart. """
colrpart = cls('', start=part.start, stop=part.stop)
colrpart.data = part.data
return colrpart
class InvalidArg(ValueError):
""" A ValueError for when the user uses invalid arguments. """
default_label = 'Invalid argument'
default_format = '{label}: {value}'
def __init__(self, value, label=None):
self.label = label or self.default_label
self.value = value
def __colr__(self):
""" Allows Colr(InvalidArg()) with default styling. """
return self.as_colr()
def __str__(self):
return self.default_format.format(
label=self.label,
value=repr(self.value)
)
def as_colr(self, label_args=None, value_args=None):
""" Like __str__, except it returns a colorized Colr instance. """
label_args = label_args or {'fore': 'red'}
value_args = value_args or {'fore': 'blue', 'style': 'bright'}
return Colr(self.default_format.format(
label=Colr(self.label, **label_args),
value=Colr(repr(self.value), **value_args),
))
# TODO: Remove all of this dynamic stuff, and hard-code the format strings
# for each of the InvalidColr,InvalidFormatColr,InvalidFormatArg
# exceptions. They can still be custom-colored. They're not using
# inheritance very well anymore. They might as well be easier to read.
class InvalidColr(InvalidArg):
""" A ValueError for when user passes an invalid colr name, value, rgb.
"""
accepted_values = (
('hex', '[#]rgb/[#]rrggbb'),
('name', 'white/black/etc.'),
('rgb', '0-255, 0-255, 0-255'),
('value', '0-255'),
) # type: Tuple[Tuple[str, ...], ...]
default_label = 'Expecting colr name/value:\n {types}'.format(
types=',\n '.join(
'{lbl:<5} ({val})'.format(lbl=l, val=v)
for l, v in accepted_values
)
)
default_format = '{label}\n Got: {value}'
def __colr__(self):
""" Like __str__, except it returns a colorized Colr instance. """
return self.as_colr()
def as_colr(
self, label_args=None, type_args=None, type_val_args=None,
value_args=None):
""" Like __str__, except it returns a colorized Colr instance. """
label_args = label_args or {'fore': 'red'}
type_args = type_args or {'fore': 'yellow'}
type_val_args = type_val_args or {'fore': 'grey'}
value_args = value_args or {'fore': 'blue', 'style': 'bright'}
return Colr(self.default_format.format(
label=Colr(':\n ').join(
Colr('Expecting color name/value', **label_args),
',\n '.join(
'{lbl:<5} ({val})'.format(
lbl=Colr(l, **type_args),
val=Colr(v, **type_val_args),
)
for l, v in self.accepted_values
)
),
value=Colr(repr(self.value), **value_args)
))
class InvalidFormatColr(InvalidColr):
""" A ValueError for when user passes an invalid colr name, value, rgb
for a Colr.__format__ spec.
"""
accepted_values = (
('hex', '[#]rgb/[#]rrggbb'),
('name', 'white/black/etc.'),
('rgb', '0-255; 0-255; 0-255'),
('value', '0-255'),
) # type: Tuple[Tuple[str, ...], ...]
default_msg = 'Bad format spec. color name/value.'
default_label = (
'{{msg}} Expecting:\n {types}'.format(
types=',\n '.join(
'{lbl:<5} ({val})'.format(lbl=l, val=v)
for l, v in accepted_values
)
)
)
default_format = '{label}\nGot: {value}\nIn spec: {spec}'
def __init__(self, spec, value, msg=None):
super().__init__(value)
self.spec = spec
self.msg = msg
def __colr__(self):
return self.as_colr()
def __str__(self):
return self.default_format.format(
label=self.label.format(msg=self.msg or self.default_msg),
value=repr(self.value),
spec=repr(self.spec),
)
def as_colr(
self, label_args=None, type_args=None, type_val_args=None,
value_args=None, spec_args=None):
""" Like __str__, except it returns a colorized Colr instance. """
label_args = label_args or {'fore': 'red'}
type_args = type_args or {'fore': 'yellow'}
type_val_args = type_val_args or {'fore': 'grey'}
value_args = value_args or {'fore': 'blue', 'style': 'bright'}
spec_args = spec_args or {'fore': 'blue'}
spec_repr = repr(self.spec)
spec_quote = spec_repr[0]
val_repr = repr(self.value)
val_quote = val_repr[0]
return Colr(self.default_format.format(
label=Colr(':\n ').join(
Colr(
'{} Expecting'.format(self.msg or self.default_msg),
**label_args
),
',\n '.join(
'{lbl:<5} ({val})'.format(
lbl=Colr(l, **type_args),
val=Colr(v, **type_val_args),
)
for l, v in self.accepted_values
)
),
spec=Colr('=').join(
Colr(v, **spec_args)
for v in spec_repr[1:-1].split('=')
).join((spec_quote, spec_quote)),
value=Colr(
val_repr[1:-1],
**value_args
).join((val_quote, val_quote)),
))
class InvalidFormatArg(InvalidFormatColr):
""" A ValueError for when user passes an invalid key/value
for a Colr.__format__ spec.
"""
accepted_keys = ('fore', 'back', 'style')
example_values = {s: '{}_arg'.format(s) for s in accepted_keys}
accepted_values = (
(
'keyed',
', '.join('{}={}'.format(k, v) for k, v in example_values.items())
),
('keyless', ', '.join('{}_arg'.format(s) for s in accepted_keys)),
)
default_msg = 'Bad format spec. argument.'
default_label = (
'{{msg}} Expecting:\n {types}'.format(
types=',\n '.join(
'{lbl:<8} [{val}]'.format(lbl=l, val=v)
for l, v in accepted_values
)
)
)
def as_colr(
self, label_args=None, type_args=None, type_val_args=None,
value_args=None, spec_args=None):
""" Like __str__, except it returns a colorized Colr instance. """
label_args = label_args or {'fore': 'red'}
type_args = type_args or {'fore': 'yellow'}
type_val_args = type_val_args or {'fore': 'dimgrey'}
value_args = value_args or {'fore': 'blue', 'style': 'bright'}
spec_args = spec_args or {'fore': 'blue'}
key_args = {'fore': 'blue'}
spec_repr = repr(self.spec)
spec_quote = spec_repr[0]
val_repr = repr(self.value)
val_quote = val_repr[0]
colr_vals = (
(
'keyed',
', '.join(
'{}={}'.format(
Colr(k, **key_args),
Colr(v, **type_val_args)
)
for k, v in self.example_values.items())
),
(
'keyless',
', '.join(
str(Colr('{}_arg'.format(s), **type_val_args))
for s in self.accepted_keys
)
),
)
return Colr(self.default_format.format(
label=Colr(':\n ').join(
Colr(
'{} Expecting'.format(self.msg or self.default_msg),
**label_args
),
',\n '.join(
'{lbl:<8} [{val}]'.format(
lbl=Colr(l, **type_args),
val=v,
)
for l, v in colr_vals
)
),
spec=Colr('=').join(
Colr(v, **spec_args)
for v in spec_repr[1:-1].split('=')
).join((spec_quote, spec_quote)),
value=Colr(
val_repr[1:-1],
**value_args
).join((val_quote, val_quote)),
))
class InvalidEscapeCode(InvalidArg):
""" A ValueError for when an invalid escape code is given. """
default_label = 'Expecting 0-255 for escape code value'
class InvalidRgbEscapeCode(InvalidEscapeCode):
""" A ValueError for when an invalid rgb escape code is given. """
default_label = 'Expecting 0-255;0-255;0-255 for RGB escape code value'
def __init__(self, value, label=None, reason=None):
super().__init__(value, label=label)
self.reason = reason
def __str__(self):
s = super().__str__(self)
if self.reason:
s = '\n '.join((s, str(self.reason)))
return s
class InvalidStyle(InvalidColr):
default_label = 'Expecting style value:\n {styles}'.format(
styles='\n '.join(
', '.join(t[1])
for t in _stylemap
)
)
def as_colr(
self, label_args=None, type_args=None, value_args=None):
""" Like __str__, except it returns a colorized Colr instance. """
label_args = label_args or {'fore': 'red'}
type_args = type_args or {'fore': 'yellow'}
value_args = value_args or {'fore': 'blue', 'style': 'bright'}
return Colr(self.default_format.format(
label=Colr(':\n ').join(
Colr('Expecting style value', **label_args),
Colr(',\n ').join(
Colr(', ').join(
Colr(v, **type_args)
for v in t[1]
)
for t in _stylemap
)
),
value=Colr(repr(self.value), **value_args)
))
# Shortcuts.
color = Colr().color
if __name__ == '__main__':
if ('--auto-disable' in sys.argv) or ('-a' in sys.argv):
auto_disable()
print(
Colr('warning', 'red')
.join('[', ']', style='bright')(' ')
.green('This module is meant to be ran with `python -m colr`.')
) | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/4.4/zlib.js | var zlib = {};
/**
* Returns a new [Deflate][] object with an [options][].
* @param options
* @returns {zlib.Deflate} a new Deflate object with an options
*/
zlib.createDeflate = function(options) {}
/**
* Returns a new [DeflateRaw][] object with an [options][].
* @param options
* @returns {zlib.DeflateRaw} a new DeflateRaw object with an options
*/
zlib.createDeflateRaw = function(options) {}
/**
* Returns a new [Gunzip][] object with an [options][].
* @param options
* @returns {zlib.Gunzip} a new Gunzip object with an options
*/
zlib.createGunzip = function(options) {}
/**
* Returns a new [Gzip][] object with an [options][].
* @param options
* @returns {zlib.Gzip} a new Gzip object with an options
*/
zlib.createGzip = function(options) {}
/**
* Returns a new [Inflate][] object with an [options][].
* @param options
* @returns {zlib.Inflate} a new Inflate object with an options
*/
zlib.createInflate = function(options) {}
/**
* Returns a new [InflateRaw][] object with an [options][].
* @param options
* @returns {zlib.InflateRaw} a new InflateRaw object with an options
*/
zlib.createInflateRaw = function(options) {}
/**
* Returns a new [Unzip][] object with an [options][].
* @param options
* @returns {zlib.Unzip} a new Unzip object with an options
*/
zlib.createUnzip = function(options) {}
/**
* Compress data using deflate.
* @constructor
*/
zlib.Deflate = function() {}
zlib.Deflate.prototype = new stream.ReadableStream();
zlib.Deflate.prototype = new stream.WritableStream();
/**
* Compress data using deflate, and do not append a zlib header.
* @constructor
*/
zlib.DeflateRaw = function() {}
zlib.DeflateRaw.prototype = new stream.ReadableStream();
zlib.DeflateRaw.prototype = new stream.WritableStream();
/**
* Decompress a gzip stream.
* @constructor
*/
zlib.Gunzip = function() {}
zlib.Gunzip.prototype = new stream.ReadableStream();
zlib.Gunzip.prototype = new stream.WritableStream();
/**
* Compress data using gzip.
* @constructor
*/
zlib.Gzip = function() {}
zlib.Gzip.prototype = new stream.ReadableStream();
zlib.Gzip.prototype = new stream.WritableStream();
/**
* Decompress a deflate stream.
* @constructor
*/
zlib.Inflate = function() {}
zlib.Inflate.prototype = new stream.ReadableStream();
zlib.Inflate.prototype = new stream.WritableStream();
/**
* Decompress a raw deflate stream.
* @constructor
*/
zlib.InflateRaw = function() {}
zlib.InflateRaw.prototype = new stream.ReadableStream();
zlib.InflateRaw.prototype = new stream.WritableStream();
/**
* Decompress either a Gzip- or Deflate-compressed stream by auto-detecting
* the header.
* @constructor
*/
zlib.Unzip = function() {}
zlib.Unzip.prototype = new stream.ReadableStream();
zlib.Unzip.prototype = new stream.WritableStream();
/**
* Not exported by the zlib module. It is documented here because it is the
* base class of the compressor/decompressor classes.
* @constructor
*/
zlib.Zlib = function() {}
/**
* kind defaults to zlib.Z_FULL_FLUSH.
* @param kind=zlib.Z_FULL_FLUSH
* @param callback
*/
zlib.Zlib.prototype.flush = function(kind, callback) {}
/**
* Dynamically update the compression level and compression strategy.
* @param level
* @param strategy
* @param callback
*/
zlib.Zlib.prototype.params = function(level, strategy, callback) {}
/**
* Reset the compressor/decompressor to factory defaults. Only applicable
* to the inflate and deflate algorithms.
*/
zlib.Zlib.prototype.reset = function() {}
/* constants */
zlib.Z_OK = 0;
zlib.Z_STREAM_END = 0;
zlib.Z_NEED_DICT = 0;
zlib.Z_ERRNO = 0;
zlib.Z_STREAM_ERROR = 0;
zlib.Z_DATA_ERROR = 0;
zlib.Z_MEM_ERROR = 0;
zlib.Z_BUF_ERROR = 0;
zlib.Z_VERSION_ERROR = 0;
zlib.Z_NO_COMPRESSION = 0;
zlib.Z_BEST_SPEED = 0;
zlib.Z_BEST_COMPRESSION = 0;
zlib.Z_DEFAULT_COMPRESSION = 0;
zlib.Z_FILTERED = 0;
zlib.Z_HUFFMAN_ONLY = 0;
zlib.Z_RLE = 0;
zlib.Z_FIXED = 0;
zlib.Z_DEFAULT_STRATEGY = 0;
zlib.Z_BINARY = 0;
zlib.Z_TEXT = 0;
zlib.Z_ASCII = 0;
zlib.Z_UNKNOWN = 0;
zlib.Z_DEFLATED = 0;
zlib.Z_NULL = 0;
var stream = require('stream');
exports = zlib; | PypiClean |
/Amplo-0.17.0.tar.gz/Amplo-0.17.0/amplo/pipeline.py |
from __future__ import annotations
import time
import warnings
from pathlib import Path
from typing import Any
from warnings import warn
import numpy as np
import numpy.typing as npt
import pandas as pd
from shap import TreeExplainer
from sklearn.metrics import get_scorer_names
from sklearn.model_selection import KFold, StratifiedKFold
from amplo.automl.data_processing import DataProcessor
from amplo.automl.feature_processing.feature_processing import (
FeatureProcessor,
get_required_columns,
translate_features,
)
from amplo.automl.grid_search import OptunaGridSearch
from amplo.automl.modelling import Modeller, get_model
from amplo.automl.standardization import Standardizer
from amplo.base import AmploObject, BaseEstimator
from amplo.base.objects import LoggingMixin, Result
from amplo.observation import DataObserver, ModelObserver
from amplo.utils import clean_feature_name, io, logging
from amplo.validation import ModelValidator
__all__ = ["Pipeline"]
warnings.filterwarnings("ignore", message="lbfgs failed to converge")
pd.options.mode.copy_on_write = True
class Pipeline(AmploObject, LoggingMixin):
"""
Automated Machine Learning Pipeline for tabular data.
The pipeline is designed for predictive maintenance application, failure
identification, failure prediction, condition monitoring, and more.
Parameters
----------
# Main parameters
main_dir : str, default: "Auto_ML/"
Main directory of pipeline
target : str, optional
Column name of the output variable.
name : str, default: "AutoML"
Name of the project
version : int, default: 1
Pipeline version. Will automatically increment when a version exists.
mode : {None, "classification", "regression"}, default: None
Pipeline mode.
objective : str, optional
Objective for training.
Default for classification: "neg_log_loss".
Default for regression: "mean_square_error".
verbose : int, default: 1
Verbosity of logging.
logging_to_file : bool, default: False
Whether to write logging to a file
logging_path : str, default: "AutoML.log"
Write to logging to given path if ``logs_to_file`` is True.
# Data processing
missing_values : {"remove", "interpolate", "mean", "zero"}, default: "zero"
How to treat missing values.
outlier_removal : {"clip", "boxplot", "z-score", "none"}, default: "clip"
How to treat outliers.
z_score_threshold : int, default: 4
When ``outlier_removal`` is "z-score", the threshold is adaptable.
include_output : bool, default: False
Whether to include output in the training data (sensible only with sequencing).
# Balancing
balance : bool, default: False
Whether to balance data.
# Feature processing
extract_features : bool, default: True
Whether to use the FeatureProcessing module to extract features.
information_threshold : float, default: 0.999
Threshold for removing collinear features.
feature_timeout : int, default: 3600
Time budget for feature processing.
use_wavelets : bool, default: False
Whether to use wavelet transforms (useful for frequency data)
# Modelling
standardize : bool, default: False
Whether to standardize the input/output data.
cv_shuffle : bool, default: True
Whether to shuffle the samples during cross-validation.
cv_splits : int, default: 10
How many cross-validation splits to make.
store_models : bool, default: False
Whether to store all trained model files.
# Grid search
grid_search_timeout : int, default: 3600
Time budget for grid search (in seconds).
n_grid_searches : int, default: 3
Run grid search for the best `n_grid_searches` (model, feature set) pairs from
initial modelling.
n_trials_per_grid_search : int, default: 250
Maximal number of trials/candidates for each grid search.
# Flags
process_data : bool, default: True
Whether to force data processing.
no_dirs : bool, default: False
Whether to create files.
# Other
kwargs: Any
Swallows all arguments that are not accepted. Warnings are raised if not empty.
"""
def __init__(
self,
# Main settings
main_dir: str = "Auto_ML/",
target: str = "target",
name: str = "AutoML",
version: int = 1,
mode: str | None = None,
objective: str | None = None,
verbose: int = 1,
logging_to_file: bool = False,
logging_path: str | None = None,
*,
# Data processing
missing_values: str = "zero",
outlier_removal: str = "clip",
z_score_threshold: int = 4,
include_output: bool = False,
# Balancing
balance: bool = False,
# Feature processing
extract_features: bool = True,
information_threshold: float = 0.999,
feature_timeout: int = 3600,
use_wavelets: bool = False,
# Modelling
standardize: bool = False,
cv_shuffle: bool = True,
cv_splits: int = 10,
store_models: bool = False,
# Grid search
grid_search_timeout: int = 3600,
n_grid_searches: int = 2,
n_trials_per_grid_search: int = 250,
# Other
**kwargs,
):
AmploObject.__init__(self)
# Initialize Logger
LoggingMixin.__init__(self, verbose=verbose)
if logging_path is None:
logging_path = f"{Path(main_dir)}/AutoML.log"
if logging_to_file:
logging.add_file_handler(logging_path)
# Input checks: validity
if mode not in (None, "regression", "classification"):
raise ValueError("Supported models: {'regression', 'classification', None}")
if not 0 < information_threshold < 1:
raise ValueError("Information threshold must be within (0, 1) interval.")
# Input checks: advices
if kwargs:
warn(f"Got unexpected keyword arguments that are not handled: {kwargs}")
# Main settings
self.metadata: dict[str, dict[str, Any]] = {}
self.main_dir = f"{Path(main_dir)}/" # assert '/' afterwards
self.target = target
self.name = name
self.version = version
self.mode = mode or ""
self.objective = objective or ""
self.logging_to_file = logging_to_file
self.logging_path = logging_path
self.verbose = verbose
# Data processing
self.missing_values = missing_values
self.outlier_removal = outlier_removal
self.z_score_threshold = z_score_threshold
self.include_output = include_output
# Balancing
self.balance = balance
# Feature processing
self.extract_features = extract_features
self.information_threshold = information_threshold
self.feature_timeout = feature_timeout
self.use_wavelets = use_wavelets
# Modelling
self.standardize = standardize
self.cv_shuffle = cv_shuffle
self.cv_splits = cv_splits
self.store_models = store_models
# Grid search
self.grid_search_timeout = grid_search_timeout
self.n_grid_searches = n_grid_searches
self.n_trials_per_grid_search = n_trials_per_grid_search
# Set version
self.version = version if version else 1
# Objective & Scorer
if self.objective and self.objective not in get_scorer_names():
raise ValueError(f"Invalid objective.\nPick from {get_scorer_names()}")
# Required sub-classes
self.data_processor: DataProcessor
self.feature_processor: FeatureProcessor
self.standardizer: Standardizer
self.data_observations: list[dict[str, str | bool]] = []
self.model_observations: list[dict[str, str | bool]] = []
# Instance initiating
self.best_model_: BaseEstimator
self.results_: list[Result] = []
self.is_fitted_ = False
self.validation: dict[str, Any] = {}
# Monitoring
self.file_delta_: dict[str, list[str]]
self._prediction_time_: float
self.main_predictors_: dict[str, float]
def fit(
self,
data: npt.NDArray[Any] | pd.DataFrame | str | Path,
target: npt.NDArray[Any] | pd.Series | str | None = None,
*,
metadata: dict[str, dict[str, Any]] | None = None,
model: str | None = None,
feature_set: str | None = None,
):
"""
Fit the full AutoML pipeline.
1. Prepare data for training
2. Train / optimize models
3. Prepare Production Files
Nicely organises all required scripts / files to make a prediction
Parameters
----------
data_or_path : npt.NDArray[Any] or pd.DataFrame or str or Path
Data or path to data. Propagated to `self.data_preparation`.
target : npt.NDArray[Any] or pd.Series or str
Target data or column name. Propagated to `self.data_preparation`.
*
metadata : dict of {int : dict of {str : str or float}}, optional
Metadata. Propagated to `self.data_preparation`.
model : str, optional
Limits model training and grid search to a specific model.
feature_set : str, optional
Limits model training and grid search to a specific feature set.
params : dict, optional
Constrain parameters for fitting conclusion.
Propagated to `self.conclude_fitting`.
"""
# Starting
self.logger.info(f"\n\n*** Starting Amplo AutoML - {self.name} ***\n\n")
# Reading data
data = self._read_data(data, target, metadata=metadata)
# Detect mode (classification / regression)
self._mode_detector(data)
self._set_subclasses()
# Preprocess Data
data = self.data_processor.fit_transform(data)
# Extract and select features
data = self.feature_processor.fit_transform(data, feature_set=feature_set)
# Standardize
if self.standardize:
data = self.standardizer.fit_transform(data)
# Model Training
################
for feature_set_, cols in self.feature_processor.feature_sets_.items():
if feature_set and feature_set_ != feature_set:
continue
self.logger.info(f"Fitting modeller on: {feature_set_}")
feature_data: pd.DataFrame = data[cols + [self.target]]
results_ = Modeller(
target=self.target,
mode=self.mode,
cv=self.cv,
objective=self.objective,
verbose=self.verbose,
feature_set=feature_set_,
model=model,
).fit(feature_data)
self.results_.extend(results_)
self.sort_results()
# Optimize Hyper parameters
for model_, feature_set in self.grid_search_iterations():
if feature_set not in self.feature_processor.feature_sets_:
raise ValueError(f"Found invalid feature set: '{feature_set}'")
self.logger.info(
f"Starting Hyper Parameter Optimization for {model_} on "
f"{feature_set} features ({len(data)} samples, "
f"{len(self.feature_processor.feature_sets_[feature_set])} features)"
)
results_ = OptunaGridSearch(
get_model(model_),
target=self.target,
timeout=self.grid_search_timeout,
feature_set=feature_set,
cv=self.cv,
n_trials=self.n_trials_per_grid_search,
scoring=self.objective,
verbose=self.verbose,
).fit(data)
self.results_.extend(results_)
self.sort_results()
self.train_val_best(data)
self.data_observations = DataObserver().observe(
data, self.mode, self.target, self.data_processor.dummies_
)
self.model_observations = ModelObserver().observe(
self.best_model_, data, self.target, self.mode
)
self.is_fitted_ = True
self.logger.info("All done :)")
logging.del_file_handlers()
def transform(self, data: pd.DataFrame) -> pd.DataFrame:
if not self.is_fitted_:
raise ValueError("Pipeline not yet fitted.")
# Process data
data = self.data_processor.transform(data)
# Convert Features
data = self.feature_processor.transform(
data, feature_set=self.best_feature_set_
)
# Standardize
if self.standardize:
data = self.standardizer.transform(data)
# Output
if not self.include_output and self.target in data:
data = data.drop(self.target, axis=1)
# Return
return data
def predict(self, data: pd.DataFrame) -> pd.Series:
"""
Full script to make predictions. Uses 'Production' folder with defined or
latest version.
Parameters
----------
data : pd.DataFrame
Data to do prediction on.
"""
start_time = time.time()
if not self.is_fitted_:
raise ValueError("Pipeline not yet fitted.")
self.logger.info(
f"Predicting with {type(self.best_model_).__name__}, v{self.version}"
)
# Convert
data = self.transform(data)
# Predict
predictions = self.best_model_.predict(data)
# Convert
if self.mode == "regression" and self.standardize:
predictions = self.standardizer.reverse(predictions, column=self.target)
elif self.mode == "classification":
predictions = self.data_processor.decode_labels(predictions)
# Stop timer
self._prediction_time_ = (time.time() - start_time) / len(data) * 1000
# Calculate main predictors
self._get_main_predictors(data)
return predictions
def predict_proba(self, data: pd.DataFrame) -> npt.NDArray[Any]:
"""
Returns probabilistic prediction, only for classification.
Parameters
----------
data : pd.DataFrame
Data to do prediction on.
"""
start_time = time.time()
if not self.is_fitted_:
raise ValueError("Pipeline not yet fitted.")
if self.mode != "classification":
raise ValueError("Predict_proba only available for classification")
if not hasattr(self.best_model_, "predict_proba"):
raise ValueError(
f"{type(self.best_model_).__name__} has no attribute predict_proba"
)
self.logger.info(
f"Predicting with {type(self.best_model_).__name__}, v{self.version}"
)
# Convert data
data = self.transform(data)
# Predict
prediction = self.best_model_.predict_proba(data)
# Stop timer
self._prediction_time_ = (time.time() - start_time) / len(data) * 1000
# Calculate main predictors
self._get_main_predictors(data)
return prediction
# Support functions
def grid_search_iterations(self) -> list[tuple[str, str]]:
"""Takes top `n_grid_searches` models / feature set combi's from results"""
return [
(self.results_[i].model, self.results_[i].feature_set)
for i in range(self.n_grid_searches)
]
def train_val_best(self, data: pd.DataFrame):
"""Arranges settings and parameter file."""
# Train model on all training data
best_model_ = get_model(self.best_model_str_)
best_model_.set_params(**self.best_params_)
best_model_.fit(data[self.best_features_], data[self.target])
self.best_model_ = best_model_
# Prune Data Processor
required_features = get_required_columns(
self.feature_processor.feature_sets_[self.best_feature_set_]
)
self.data_processor.prune_features(required_features)
# Set feature set
self.feature_processor.set_feature_set(self.best_feature_set_)
# Validation
self.validation = ModelValidator(
target=self.target,
cv=self.cv,
verbose=self.verbose,
).validate(model=best_model_, data=data, mode=self.mode)
def _read_data(
self,
data_or_path: npt.NDArray[Any] | pd.DataFrame | str | Path,
target: list[Any]
| tuple[Any]
| npt.NDArray[Any]
| pd.Series
| str
| Path
| None = None,
*,
metadata: dict[str, dict[str, Any]] | None = None,
) -> pd.DataFrame:
"""
Read and validate data.
Notes
-----
The required parameters depend on the input parameter types.
When ``target`` is None, it is set to ``self.target`` or "target" otherwise.
When ``data_or_path`` is path-like, then the parameters ``target`` and
``metadata`` are not required.
Otherwise, when ``data_or_path`` is array-like, it either must contain a column
name as the ``target`` parameter indicates or ``target`` must also be an
array-like object with the same length as ``data_or_path``.
Note: There's three combinations of data_or_path and target
1. if data_or_path = pd.DataFrame, target = pd.Series | None | str
2. if data_or_path = npt.NDArray[Any], target = npt.NDArray[Any] | pd.Series
3. if data_or_path = path | str, target = path | str | None
Parameters
----------
data_or_path : npt.NDArray[Any] or pd.DataFrame or str or Path
Data or path to data.
target : npt.NDArray[Any] or pd.Series or str
Target data or column name or directory name
*
metadata : dict of {int : dict of {str : str or float}}, optional
Metadata.
Returns
-------
Pipeline
The same object but with injected data.
"""
self.logger.info("Reading data.")
# 1. if data_or_path = pd.DataFrame, target = ArrayLike | str | None
if isinstance(data_or_path, pd.DataFrame):
self.logger.debug("Detected pandas dataframe. Checking target.")
data = data_or_path
# If it's a series, we check index and take the name
if isinstance(target, pd.Series):
if not all(data.index == target.index):
warn(
"Indices of data and target don't match. Target index will be "
"overwritten by data index."
)
target.index = data.index
if target.name and self.target != target.name:
warn(
"Provided target series has a different name than initialized "
"target. Using series name."
)
self.target = str(target.name)
# Then for arraylike, we check length and make sure target is not in data
if isinstance(target, (list, tuple, pd.Series, np.ndarray)):
if len(data) != len(target):
raise ValueError("Length of target and data don't match.")
if self.target in data and (data[self.target] != target).any():
raise ValueError(
f"The column '{self.target}' column already exists in `data` "
f"but has different values."
)
data[self.target] = target
# If it's a string, we check its presence and update self.target
elif isinstance(target, str):
if target not in data:
raise ValueError("Provided target column not present in data.")
self.target = target
# If it's none, self.target is taken from __init__
elif isinstance(target, type(None)):
if self.target not in data:
raise ValueError("Initialized target column not present in data.")
else:
raise NotImplementedError(
"When data_or_path is a DataFrame, target needs to "
"be a Series, str or None"
)
# 2. if data_or_path = np.ndarray, target = ArrayLike
elif isinstance(data_or_path, np.ndarray):
self.logger.debug("Detected numpy array. Checking target.")
if not isinstance(target, (np.ndarray, pd.Series, list, tuple)):
raise NotImplementedError(
"If data is ndarray, target should be ArrayLike."
)
if len(data_or_path) != len(target):
raise ValueError("Length of target and data don't match.")
if isinstance(target, pd.Series):
data = pd.DataFrame(data_or_path, index=target.index)
if target.name:
self.target = str(target.name)
else:
data = pd.DataFrame(data_or_path)
data[self.target] = target
# 3. if data_or_path = path | str, target = path | str | None
elif isinstance(data_or_path, (str, Path)):
self.logger.debug("Detected path. ")
if isinstance(target, (str, Path)):
self.target = str(target)
elif not isinstance(target, type(None)):
raise ValueError(
"Target must be string | Path | None when `data_or_path` is a "
"path-like object."
)
if metadata:
warn(
"Parameter `metadata` is ignored when `data_or_path` is a "
"path-like object."
)
data, metadata = io.merge_logs(parent=data_or_path, target=self.target)
# 4. Error.
else:
raise NotImplementedError(
"Supported data_or_path types: pd.DataFrame | np.ndarray | Path | str"
)
# Safety check
assert isinstance(data, pd.DataFrame)
# Clean target name
clean_target = clean_feature_name(self.target)
data = data.rename(columns={self.target: clean_target})
self.target = clean_target
# Finish
self.metadata = metadata or {}
self.logger.info(
f"Data contains {len(data)} samples and {len(data.keys())} columns."
)
return data
def has_new_training_data(self):
# TODO: fix a better solution for this
return True
def _mode_detector(self, data: pd.DataFrame):
"""
Detects the mode (Regression / Classification)
parameters
----------
data : pd.DataFrame
"""
self.logger.debug("Detecting mode.")
# Only run if mode is not provided
if self.mode in ("classification", "regression"):
return
# Classification if string
labels = data[self.target]
if labels.dtype == str or labels.nunique() < 0.1 * len(data):
self.mode = "classification"
self.objective = self.objective or "neg_log_loss"
# Else regression
else:
self.mode = "regression"
self.objective = self.objective or "neg_mean_absolute_error"
# Logging
self.logger.info(
f"Setting mode to {self.mode} & objective to {self.objective}."
)
def _set_subclasses(self):
"""
Simple function which sets subclasses. This cannot be done
during class initialization due to certain attributes which
are data dependent. Data is only known at calling .fit().
"""
self.logger.debug("Setting subclasses.")
self.data_processor = DataProcessor(
target=self.target,
drop_datetime=True,
include_output=True,
missing_values=self.missing_values,
outlier_removal=self.outlier_removal,
z_score_threshold=self.z_score_threshold,
verbose=self.verbose,
)
self.feature_processor = FeatureProcessor(
target=self.target,
mode=self.mode,
is_temporal=None,
use_wavelets=self.use_wavelets,
extract_features=self.extract_features,
collinear_threshold=self.information_threshold,
verbose=self.verbose,
)
self.standardizer = Standardizer(
target=self.target, mode=self.mode, verbose=self.verbose
)
# Support Functions
def sort_results(self) -> list[Result]:
self.results_.sort(reverse=True)
return self.results_
def _get_main_predictors(self, data: pd.DataFrame) -> dict[str, float]:
"""
Using Shapely Additive Explanations, this function calculates the main
predictors for a given prediction and sets them into the class' memory.
"""
# shap.TreeExplainer is not implemented for all models. So we try and fall back
# to the feature importance given by the feature processor.
# Note that the error would be raised when calling `TreeExplainer(best_model_)`.
try:
# Get shap values
best_model_ = self.best_model_
if best_model_ is not None and hasattr(best_model_, "model"):
best_model_ = best_model_.model
# Note: The error would be raised at this point.
# So we have not much overhead.
shap_values = np.array(TreeExplainer(best_model_).shap_values(data))
# Average over classes if necessary
if shap_values.ndim == 3:
shap_values = np.mean(np.abs(shap_values), axis=0)
# Average over samples
shap_values = np.mean(np.abs(shap_values), axis=0)
shap_values /= shap_values.sum() # normalize to sum up to 1
idx_sort = np.flip(np.argsort(shap_values))
# Set class attribute
main_predictors = {
col: score
for col, score in zip(data.columns[idx_sort], shap_values[idx_sort])
}
except Exception:
# Get shap feature importance
fi = self.feature_processor.feature_importance_.get("rf", {})
# Use only those columns that are present in the data
main_predictors = {}
missing_columns = []
for col in data:
if col in fi:
main_predictors[col] = fi[col]
else:
missing_columns.append(col)
if missing_columns:
warn(
f"Some data column names are missing in the shap feature "
f"importance dictionary: {missing_columns}"
)
# Some feature names are obscure since they come from the feature processing
# module. Here, we relate the feature importance back to the original features.
translation = translate_features(list(main_predictors))
self.main_predictors_ = {}
for key, features in translation.items():
for feat in features:
self.main_predictors_[feat] = (
self.main_predictors_.get(feat, 0.0) + main_predictors[key]
)
# Normalize
total_score = np.sum(list(self.main_predictors_.values()))
for key in self.main_predictors_:
self.main_predictors_[key] /= total_score
return self.main_predictors_
# Properties
@property
def cv(self):
"""
Gives the Cross Validation scheme
Returns
-------
cv : KFold or StratifiedKFold
The cross validator
"""
# Regression
if self.mode == "regression":
return KFold(
n_splits=self.cv_splits,
shuffle=self.cv_shuffle,
random_state=83847939 if self.cv_shuffle else None,
)
# Classification
if self.mode == "classification":
return StratifiedKFold(
n_splits=self.cv_splits,
shuffle=self.cv_shuffle,
random_state=83847939 if self.cv_shuffle else None,
)
else:
raise NotImplementedError("Unknown Mode.")
@property
def best_feature_set_(self) -> str:
if not self.results_:
raise ValueError("No results available.")
return self.results_[0].feature_set
@property
def best_features_(self) -> list[str]:
feature_sets = self.feature_processor.feature_selector.feature_sets_
return feature_sets[self.best_feature_set_]
@property
def best_model_str_(self) -> str:
if not self.results_:
raise ValueError("No results available.")
return self.results_[0].model
@property
def best_params_(self) -> dict[str, Any]:
if not self.results_:
raise ValueError("No results available.")
return io.parse_json(self.results_[0].params) # type: ignore[return-value]
@property
def best_score_(self) -> float:
if not self.results_:
raise ValueError("No results available.")
return self.results_[0].worst_case | PypiClean |
/BoxKit-2023.6.7.tar.gz/BoxKit-2023.6.7/boxkit/library/_dataset.py |
from boxkit.library import Block # pylint: disable=cyclic-import
from boxkit.library import Data # pylint: disable=cyclic-import
from boxkit.library import Action # pylint: disable=cyclic-import
class Dataset: # pylint: disable=too-many-instance-attributes
"""API class for storing Dataset info"""
type_ = "default"
def __init__(self, blocklist, data):
"""Constructor for Dataset
Parameters
----------
blocklist : list of block objects
data : Data object
"""
super().__init__()
self.blocklist = []
self.xmin, self.ymin, self.zmin = [1e10] * 3
self.xmax, self.ymax, self.zmax = [-1e10] * 3
self._data = None
self._map_blocklist(blocklist)
self._map_data(data)
def __repr__(self):
"""Return a representation of the object."""
return (
"Dataset:\n"
+ f" - type : {type(self)}\n"
+ f" - file : {self._data.inputfile}\n"
+ f" - keys : {self._data.varlist}\n"
+ f" - dtype : {list(self._data.dtype.values())}\n"
+ f" - bound(z-y-x) : [{self.zmin}, {self.zmax}] x "
+ f"[{self.ymin}, {self.ymax}] x "
+ f"[{self.xmin}, {self.xmax}]\n"
+ f" - shape(z-y-x) : {self.nzb} x {self.nyb} x {self.nxb}\n"
+ f" - guard(z-y-x) : {self.zguard} x {self.yguard} x {self.xguard}\n"
+ f" - nblocks : {self.nblocks}\n"
+ f" - dtype : {self._data.dtype}"
)
def __getitem__(self, varkey):
"""
Get variable data
"""
return self._data[varkey]
def __setitem__(self, varkey, value):
"""
Set variable data
"""
self._data[varkey] = value
def _map_blocklist(self, blocklist):
"""
Private method for initialization
"""
if not blocklist:
return
self.blocklist = blocklist
for block in self.blocklist:
self.xmin = min(self.xmin, block.xmin)
self.ymin = min(self.ymin, block.ymin)
self.zmin = min(self.zmin, block.zmin)
self.xmax = max(self.xmax, block.xmax)
self.ymax = max(self.ymax, block.ymax)
self.zmax = max(self.zmax, block.zmax)
def _map_data(self, data):
"""
Private method for initialization
"""
if not data:
return
self._data = data
@property
def nblocks(self):
"""nblocks"""
return self._data.nblocks
@property
def nxb(self):
"""nxb"""
return self._data.nxb
@property
def nyb(self):
"""nyb"""
return self._data.nyb
@property
def nzb(self):
"""nzb"""
return self._data.nzb
@property
def xguard(self):
"""xguard"""
return self._data.xguard
@property
def yguard(self):
"""yguard"""
return self._data.yguard
@property
def zguard(self):
"""zguard"""
return self._data.zguard
@property
def varlist(self):
"""varlist"""
return self._data.varlist
@property
def source(self):
"""source"""
return self._data.source
@property
def dtype(self):
"""dtype"""
return self._data.dtype
def addvar(self, varkey, dtype=float):
"""addvar"""
self._data.addvar(varkey, dtype)
def delvar(self, varkey):
"""delvar"""
self._data.delvar(varkey)
def purge(self, purgeflag="all"):
"""
Clean up the dataset and close it
"""
self._data.purge(purgeflag)
def clone(self, storage="numpy-memmap"):
"""
Clone dataset
"""
# Create data attributes
data_attributes = {
"nblocks": int(self.nblocks),
"nxb": int(self.nxb),
"nyb": int(self.nyb),
"nzb": int(self.nzb),
"storage": storage,
}
data = Data(**data_attributes)
# Create block attributes
block_attributes = [
{
"dx": block.dx,
"dy": block.dy,
"dz": block.dz,
"xmin": block.xmin,
"ymin": block.ymin,
"zmin": block.zmin,
"xmax": block.xmax,
"ymax": block.ymax,
"zmax": block.zmax,
"tag": block.tag,
"leaf": block.leaf,
"level": block.level,
}
for block in self.blocklist
]
blocklist = [Block(data, **attributes) for attributes in block_attributes]
return self.__class__(blocklist, data)
def halo_exchange( # pylint: disable=too-many-arguments
self,
varlist,
nthreads=1,
batch="auto",
backend="serial",
monitor=False,
):
"""
Perform halo exchange
"""
# Convert single string to a list
if isinstance(varlist, str):
varlist = [varlist]
halo_exchange_block.nthreads = nthreads
halo_exchange_block.batch = batch
halo_exchange_block.backend = backend
halo_exchange_block.monitor = monitor
for varkey in varlist:
halo_exchange_block((block for block in self.blocklist), varkey)
@Action
def halo_exchange_block(block, varkey):
"""
Halo exchange
"""
block.exchange_neighdata(varkey) | PypiClean |
/GDAL-3.7.1.1.tar.gz/GDAL-3.7.1.1/gdal-utils/osgeo_utils/samples/validate_cloud_optimized_geotiff.py |
import os.path
import struct
import sys
from osgeo import gdal
def Usage():
print(
"Usage: validate_cloud_optimized_geotiff.py [-q] [--full-check=yes/no/auto] test.tif"
)
print("")
print("Options:")
print("-q: quiet mode")
print(
"--full-check=yes/no/auto: check tile/strip leader/trailer bytes. auto=yes for local files, and no for remote files"
)
return 2
class ValidateCloudOptimizedGeoTIFFException(Exception):
pass
def full_check_band(
f,
band_name,
band,
errors,
block_order_row_major,
block_leader_size_as_uint4,
block_trailer_last_4_bytes_repeated,
mask_interleaved_with_imagery,
):
block_size = band.GetBlockSize()
mask_band = None
if mask_interleaved_with_imagery:
mask_band = band.GetMaskBand()
mask_block_size = mask_band.GetBlockSize()
if block_size != mask_block_size:
errors += [
band_name + ": mask block size is different from its imagery band"
]
mask_band = None
yblocks = (band.YSize + block_size[1] - 1) // block_size[1]
xblocks = (band.XSize + block_size[0] - 1) // block_size[0]
last_offset = 0
for y in range(yblocks):
for x in range(xblocks):
offset = band.GetMetadataItem("BLOCK_OFFSET_%d_%d" % (x, y), "TIFF")
offset = int(offset) if offset is not None else 0
bytecount = band.GetMetadataItem("BLOCK_SIZE_%d_%d" % (x, y), "TIFF")
bytecount = int(bytecount) if bytecount is not None else 0
if offset > 0:
if block_order_row_major and offset < last_offset:
errors += [
band_name
+ ": offset of block (%d, %d) is smaller than previous block"
% (x, y)
]
if block_leader_size_as_uint4:
gdal.VSIFSeekL(f, offset - 4, 0)
leader_size = struct.unpack("<I", gdal.VSIFReadL(4, 1, f))[0]
if leader_size != bytecount:
errors += [
band_name
+ ": for block (%d, %d), size in leader bytes is %d instead of %d"
% (x, y, leader_size, bytecount)
]
if block_trailer_last_4_bytes_repeated:
if bytecount >= 4:
gdal.VSIFSeekL(f, offset + bytecount - 4, 0)
last_bytes = gdal.VSIFReadL(8, 1, f)
if last_bytes[0:4] != last_bytes[4:8]:
errors += [
band_name
+ ": for block (%d, %d), trailer bytes are invalid"
% (x, y)
]
if mask_band:
offset_mask = mask_band.GetMetadataItem(
"BLOCK_OFFSET_%d_%d" % (x, y), "TIFF"
)
offset_mask = int(offset_mask) if offset_mask is not None else 0
if offset > 0 and offset_mask > 0:
# bytecount_mask = int(mask_band.GetMetadataItem('BLOCK_SIZE_%d_%d' % (x,y), 'TIFF'))
expected_offset_mask = (
offset
+ bytecount
+ (4 if block_leader_size_as_uint4 else 0)
+ (4 if block_trailer_last_4_bytes_repeated else 0)
)
if offset_mask != expected_offset_mask:
errors += [
"Mask of "
+ band_name
+ ": for block (%d, %d), offset is %d, whereas %d was expected"
% (x, y, offset_mask, expected_offset_mask)
]
elif offset == 0 and offset_mask > 0:
if block_order_row_major and offset_mask < last_offset:
errors += [
"Mask of "
+ band_name
+ ": offset of block (%d, %d) is smaller than previous block"
% (x, y)
]
offset = offset_mask
last_offset = offset
def validate(ds, check_tiled=True, full_check=False):
"""Check if a file is a (Geo)TIFF with cloud optimized compatible structure.
Args:
ds: GDAL Dataset for the file to inspect.
check_tiled: Set to False to ignore missing tiling.
full_check: Set to TRUe to check tile/strip leader/trailer bytes. Might be slow on remote files
Returns:
A tuple, whose first element is an array of error messages
(empty if there is no error), and the second element, a dictionary
with the structure of the GeoTIFF file.
Raises:
ValidateCloudOptimizedGeoTIFFException: Unable to open the file or the
file is not a Tiff.
"""
if int(gdal.VersionInfo("VERSION_NUM")) < 2020000:
raise ValidateCloudOptimizedGeoTIFFException("GDAL 2.2 or above required")
unicode_type = type("".encode("utf-8").decode("utf-8"))
if isinstance(ds, (str, unicode_type)):
gdal.PushErrorHandler()
ds = gdal.Open(ds)
gdal.PopErrorHandler()
if ds is None:
raise ValidateCloudOptimizedGeoTIFFException(
"Invalid file : %s" % gdal.GetLastErrorMsg()
)
if ds.GetDriver().ShortName != "GTiff":
raise ValidateCloudOptimizedGeoTIFFException("The file is not a GeoTIFF")
details = {}
errors = []
warnings = []
filename = ds.GetDescription()
main_band = ds.GetRasterBand(1)
ovr_count = main_band.GetOverviewCount()
filelist = ds.GetFileList()
if filelist is not None and filename + ".ovr" in filelist:
errors += ["Overviews found in external .ovr file. They should be internal"]
if main_band.XSize > 512 or main_band.YSize > 512:
if check_tiled:
block_size = main_band.GetBlockSize()
if block_size[0] == main_band.XSize and block_size[0] > 1024:
errors += ["The file is greater than 512xH or Wx512, but is not tiled"]
if ovr_count == 0:
warnings += [
"The file is greater than 512xH or Wx512, it is recommended "
"to include internal overviews"
]
ifd_offset = int(main_band.GetMetadataItem("IFD_OFFSET", "TIFF"))
ifd_offsets = [ifd_offset]
block_order_row_major = False
block_leader_size_as_uint4 = False
block_trailer_last_4_bytes_repeated = False
mask_interleaved_with_imagery = False
if ifd_offset not in (8, 16):
# Check if there is GDAL hidden structural metadata
f = gdal.VSIFOpenL(filename, "rb")
if not f:
raise ValidateCloudOptimizedGeoTIFFException("Cannot open file")
signature = struct.unpack("B" * 4, gdal.VSIFReadL(4, 1, f))
bigtiff = signature in ((0x49, 0x49, 0x2B, 0x00), (0x4D, 0x4D, 0x00, 0x2B))
if bigtiff:
expected_ifd_pos = 16
else:
expected_ifd_pos = 8
gdal.VSIFSeekL(f, expected_ifd_pos, 0)
pattern = "GDAL_STRUCTURAL_METADATA_SIZE=%06d bytes\n" % 0
got = gdal.VSIFReadL(len(pattern), 1, f).decode("LATIN1")
if len(got) == len(pattern) and got.startswith(
"GDAL_STRUCTURAL_METADATA_SIZE="
):
size = int(got[len("GDAL_STRUCTURAL_METADATA_SIZE=") :][0:6])
extra_md = gdal.VSIFReadL(size, 1, f).decode("LATIN1")
block_order_row_major = "BLOCK_ORDER=ROW_MAJOR" in extra_md
block_leader_size_as_uint4 = "BLOCK_LEADER=SIZE_AS_UINT4" in extra_md
block_trailer_last_4_bytes_repeated = (
"BLOCK_TRAILER=LAST_4_BYTES_REPEATED" in extra_md
)
mask_interleaved_with_imagery = (
"MASK_INTERLEAVED_WITH_IMAGERY=YES" in extra_md
)
if "KNOWN_INCOMPATIBLE_EDITION=YES" in extra_md:
errors += ["KNOWN_INCOMPATIBLE_EDITION=YES is declared in the file"]
expected_ifd_pos += len(pattern) + size
expected_ifd_pos += (
expected_ifd_pos % 2
) # IFD offset starts on a 2-byte boundary
gdal.VSIFCloseL(f)
if expected_ifd_pos != ifd_offsets[0]:
errors += [
"The offset of the main IFD should be %d. It is %d instead"
% (expected_ifd_pos, ifd_offsets[0])
]
details["ifd_offsets"] = {}
details["ifd_offsets"]["main"] = ifd_offset
for i in range(ovr_count):
# Check that overviews are by descending sizes
ovr_band = ds.GetRasterBand(1).GetOverview(i)
if i == 0:
if ovr_band.XSize > main_band.XSize or ovr_band.YSize > main_band.YSize:
errors += ["First overview has larger dimension than main band"]
else:
prev_ovr_band = ds.GetRasterBand(1).GetOverview(i - 1)
if (
ovr_band.XSize > prev_ovr_band.XSize
or ovr_band.YSize > prev_ovr_band.YSize
):
errors += [
"Overview of index %d has larger dimension than "
"overview of index %d" % (i, i - 1)
]
if check_tiled:
block_size = ovr_band.GetBlockSize()
if block_size[0] == ovr_band.XSize and block_size[0] > 1024:
errors += ["Overview of index %d is not tiled" % i]
# Check that the IFD of descending overviews are sorted by increasing
# offsets
ifd_offset = int(ovr_band.GetMetadataItem("IFD_OFFSET", "TIFF"))
ifd_offsets.append(ifd_offset)
details["ifd_offsets"]["overview_%d" % i] = ifd_offset
if ifd_offsets[-1] < ifd_offsets[-2]:
if i == 0:
errors += [
"The offset of the IFD for overview of index %d is %d, "
"whereas it should be greater than the one of the main "
"image, which is at byte %d" % (i, ifd_offsets[-1], ifd_offsets[-2])
]
else:
errors += [
"The offset of the IFD for overview of index %d is %d, "
"whereas it should be greater than the one of index %d, "
"which is at byte %d" % (i, ifd_offsets[-1], i - 1, ifd_offsets[-2])
]
# Check that the imagery starts by the smallest overview and ends with
# the main resolution dataset
def get_block_offset(band):
blockxsize, blockysize = band.GetBlockSize()
for y in range(int((band.YSize + blockysize - 1) / blockysize)):
for x in range(int((band.XSize + blockxsize - 1) / blockxsize)):
block_offset = band.GetMetadataItem(
"BLOCK_OFFSET_%d_%d" % (x, y), "TIFF"
)
if block_offset:
return int(block_offset)
return 0
block_offset = get_block_offset(main_band)
data_offsets = [block_offset]
details["data_offsets"] = {}
details["data_offsets"]["main"] = block_offset
for i in range(ovr_count):
ovr_band = ds.GetRasterBand(1).GetOverview(i)
block_offset = get_block_offset(ovr_band)
data_offsets.append(block_offset)
details["data_offsets"]["overview_%d" % i] = block_offset
if data_offsets[-1] != 0 and data_offsets[-1] < ifd_offsets[-1]:
if ovr_count > 0:
errors += [
"The offset of the first block of the smallest overview "
"should be after its IFD"
]
else:
errors += [
"The offset of the first block of the image should " "be after its IFD"
]
for i in range(len(data_offsets) - 2, 0, -1):
if data_offsets[i] != 0 and data_offsets[i] < data_offsets[i + 1]:
errors += [
"The offset of the first block of overview of index %d should "
"be after the one of the overview of index %d" % (i - 1, i)
]
if (
len(data_offsets) >= 2
and data_offsets[0] != 0
and data_offsets[0] < data_offsets[1]
):
errors += [
"The offset of the first block of the main resolution image "
"should be after the one of the overview of index %d" % (ovr_count - 1)
]
if full_check and (
block_order_row_major
or block_leader_size_as_uint4
or block_trailer_last_4_bytes_repeated
or mask_interleaved_with_imagery
):
f = gdal.VSIFOpenL(filename, "rb")
if not f:
raise ValidateCloudOptimizedGeoTIFFException("Cannot open file")
full_check_band(
f,
"Main resolution image",
main_band,
errors,
block_order_row_major,
block_leader_size_as_uint4,
block_trailer_last_4_bytes_repeated,
mask_interleaved_with_imagery,
)
if (
main_band.GetMaskFlags() == gdal.GMF_PER_DATASET
and (filename + ".msk") not in ds.GetFileList()
):
full_check_band(
f,
"Mask band of main resolution image",
main_band.GetMaskBand(),
errors,
block_order_row_major,
block_leader_size_as_uint4,
block_trailer_last_4_bytes_repeated,
False,
)
for i in range(ovr_count):
ovr_band = ds.GetRasterBand(1).GetOverview(i)
full_check_band(
f,
"Overview %d" % i,
ovr_band,
errors,
block_order_row_major,
block_leader_size_as_uint4,
block_trailer_last_4_bytes_repeated,
mask_interleaved_with_imagery,
)
if (
ovr_band.GetMaskFlags() == gdal.GMF_PER_DATASET
and (filename + ".msk") not in ds.GetFileList()
):
full_check_band(
f,
"Mask band of overview %d" % i,
ovr_band.GetMaskBand(),
errors,
block_order_row_major,
block_leader_size_as_uint4,
block_trailer_last_4_bytes_repeated,
False,
)
gdal.VSIFCloseL(f)
return warnings, errors, details
def main(argv=sys.argv):
"""Return 0 in case of success, 1 for failure."""
i = 1
filename = None
quiet = False
full_check = None
while i < len(argv):
if argv[i] == "-q":
quiet = True
elif argv[i] == "--full-check=yes":
full_check = True
elif argv[i] == "--full-check=no":
full_check = False
elif argv[i] == "--full-check=auto":
full_check = None
elif argv[i][0] == "-":
return Usage()
elif filename is None:
filename = argv[i]
else:
return Usage()
i += 1
if filename is None:
return Usage()
if full_check is None:
full_check = filename.startswith("/vsimem/") or os.path.exists(filename)
try:
ret = 0
warnings, errors, details = validate(filename, full_check=full_check)
if warnings:
if not quiet:
print("The following warnings were found:")
for warning in warnings:
print(" - " + warning)
print("")
if errors:
if not quiet:
print("%s is NOT a valid cloud optimized GeoTIFF." % filename)
print("The following errors were found:")
for error in errors:
print(" - " + error)
print("")
ret = 1
else:
if not quiet:
print("%s is a valid cloud optimized GeoTIFF" % filename)
if not quiet and not warnings and not errors:
headers_size = min(
details["data_offsets"][k] for k in details["data_offsets"]
)
if headers_size == 0:
headers_size = gdal.VSIStatL(filename).size
print("\nThe size of all IFD headers is %d bytes" % headers_size)
except ValidateCloudOptimizedGeoTIFFException as e:
if not quiet:
print("%s is NOT a valid cloud optimized GeoTIFF : %s" % (filename, str(e)))
ret = 1
return ret
if __name__ == "__main__":
sys.exit(main(sys.argv)) | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/path-is-absolute/readme.md | # path-is-absolute [](https://travis-ci.org/sindresorhus/path-is-absolute)
> Node.js 0.12 [`path.isAbsolute()`](http://nodejs.org/api/path.html#path_path_isabsolute_path) [ponyfill](https://ponyfill.com)
## Install
```
$ npm install --save path-is-absolute
```
## Usage
```js
const pathIsAbsolute = require('path-is-absolute');
// Running on Linux
pathIsAbsolute('/home/foo');
//=> true
pathIsAbsolute('C:/Users/foo');
//=> false
// Running on Windows
pathIsAbsolute('C:/Users/foo');
//=> true
pathIsAbsolute('/home/foo');
//=> false
// Running on any OS
pathIsAbsolute.posix('/home/foo');
//=> true
pathIsAbsolute.posix('C:/Users/foo');
//=> false
pathIsAbsolute.win32('C:/Users/foo');
//=> true
pathIsAbsolute.win32('/home/foo');
//=> false
```
## API
See the [`path.isAbsolute()` docs](http://nodejs.org/api/path.html#path_path_isabsolute_path).
### pathIsAbsolute(path)
### pathIsAbsolute.posix(path)
POSIX specific version.
### pathIsAbsolute.win32(path)
Windows specific version.
## License
MIT © [Sindre Sorhus](https://sindresorhus.com)
| PypiClean |
/Appium-Python-Client-2.11.1.tar.gz/Appium-Python-Client-2.11.1/docs/_build/html/_static/doctools.js | * select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*
* See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
*/
jQuery.urldecode = function(x) {
if (!x) {
return x
}
return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s === 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node, addItems) {
if (node.nodeType === 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 &&
!jQuery(node.parentNode).hasClass(className) &&
!jQuery(node.parentNode).hasClass("nohighlight")) {
var span;
var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.className = className;
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
var bbox = node.parentElement.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute('class', className);
addItems.push({
"parent": node.parentNode,
"target": rect});
}
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this, addItems);
});
}
}
var addItems = [];
var result = this.each(function() {
highlight(this, addItems);
});
for (var i = 0; i < addItems.length; ++i) {
jQuery(addItems[i].parent).before(addItems[i].target);
}
return result;
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
this.initOnKeyListeners();
}
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated === 'undefined')
return string;
return (typeof translated === 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated === 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) === 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this === '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
},
initOnKeyListeners: function() {
$(document).keydown(function(event) {
var activeElementType = document.activeElement.tagName;
// don't navigate when in search box, textarea, dropdown or button
if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
&& activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey
&& !event.shiftKey) {
switch (event.keyCode) {
case 37: // left
var prevHref = $('link[rel="prev"]').prop('href');
if (prevHref) {
window.location.href = prevHref;
return false;
}
break;
case 39: // right
var nextHref = $('link[rel="next"]').prop('href');
if (nextHref) {
window.location.href = nextHref;
return false;
}
break;
}
}
});
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | PypiClean |
/M5-0.3.2.tar.gz/M5-0.3.2/lib/iscroll-lite.js | (function(){
var m = Math,
vendor = (/webkit/i).test(navigator.appVersion) ? 'webkit' :
(/firefox/i).test(navigator.userAgent) ? 'Moz' :
'opera' in window ? 'O' : '',
// Browser capabilities
has3d = 'WebKitCSSMatrix' in window && 'm11' in new WebKitCSSMatrix(),
hasTouch = 'ontouchstart' in window,
hasTransform = vendor + 'Transform' in document.documentElement.style,
isIDevice = (/iphone|ipad/gi).test(navigator.appVersion),
isPlaybook = (/playbook/gi).test(navigator.appVersion),
hasTransitionEnd = isIDevice || isPlaybook,
nextFrame = (function() {
return window.requestAnimationFrame
|| window.webkitRequestAnimationFrame
|| window.mozRequestAnimationFrame
|| window.oRequestAnimationFrame
|| window.msRequestAnimationFrame
|| function(callback) { return setTimeout(callback, 17); }
})(),
cancelFrame = (function () {
return window.cancelRequestAnimationFrame
|| window.webkitCancelRequestAnimationFrame
|| window.mozCancelRequestAnimationFrame
|| window.oCancelRequestAnimationFrame
|| window.msCancelRequestAnimationFrame
|| clearTimeout
})(),
// Events
RESIZE_EV = 'onorientationchange' in window ? 'orientationchange' : 'resize',
START_EV = hasTouch ? 'touchstart' : 'mousedown',
MOVE_EV = hasTouch ? 'touchmove' : 'mousemove',
END_EV = hasTouch ? 'touchend' : 'mouseup',
CANCEL_EV = hasTouch ? 'touchcancel' : 'mouseup',
// Helpers
trnOpen = 'translate' + (has3d ? '3d(' : '('),
trnClose = has3d ? ',0)' : ')',
// Constructor
iScroll = function (el, options) {
var that = this,
doc = document,
i;
that.wrapper = typeof el == 'object' ? el : doc.getElementById(el);
that.wrapper.style.overflow = 'hidden';
that.scroller = that.wrapper.children[0];
// Default options
that.options = {
hScroll: true,
vScroll: true,
bounce: true,
bounceLock: false,
momentum: true,
lockDirection: true,
useTransform: true,
useTransition: false,
// Events
onRefresh: null,
onBeforeScrollStart: function (e) { e.preventDefault(); },
onScrollStart: null,
onBeforeScrollMove: null,
onScrollMove: null,
onBeforeScrollEnd: null,
onScrollEnd: null,
onTouchEnd: null,
onDestroy: null
};
// User defined options
for (i in options) that.options[i] = options[i];
// Normalize options
that.options.useTransform = hasTransform ? that.options.useTransform : false;
that.options.hScrollbar = that.options.hScroll && that.options.hScrollbar;
that.options.vScrollbar = that.options.vScroll && that.options.vScrollbar;
that.options.useTransition = hasTransitionEnd && that.options.useTransition;
// Set some default styles
that.scroller.style[vendor + 'TransitionProperty'] = that.options.useTransform ? '-' + vendor.toLowerCase() + '-transform' : 'top left';
that.scroller.style[vendor + 'TransitionDuration'] = '0';
that.scroller.style[vendor + 'TransformOrigin'] = '0 0';
if (that.options.useTransition) that.scroller.style[vendor + 'TransitionTimingFunction'] = 'cubic-bezier(0.33,0.66,0.66,1)';
if (that.options.useTransform) that.scroller.style[vendor + 'Transform'] = trnOpen + '0,0' + trnClose;
else that.scroller.style.cssText += ';position:absolute;top:0;left:0';
that.refresh();
that._bind(RESIZE_EV, window);
that._bind(START_EV);
if (!hasTouch) that._bind('mouseout', that.wrapper);
};
// Prototype
iScroll.prototype = {
enabled: true,
x: 0,
y: 0,
steps: [],
scale: 1,
handleEvent: function (e) {
var that = this;
switch(e.type) {
case START_EV:
if (!hasTouch && e.button !== 0) return;
that._start(e);
break;
case MOVE_EV: that._move(e); break;
case END_EV:
case CANCEL_EV: that._end(e); break;
case RESIZE_EV: that._resize(); break;
case 'mouseout': that._mouseout(e); break;
case 'webkitTransitionEnd': that._transitionEnd(e); break;
}
},
_resize: function () {
this.refresh();
},
_pos: function (x, y) {
x = this.hScroll ? x : 0;
y = this.vScroll ? y : 0;
if (this.options.useTransform) {
this.scroller.style[vendor + 'Transform'] = trnOpen + x + 'px,' + y + 'px' + trnClose + ' scale(' + this.scale + ')';
} else {
x = m.round(x);
y = m.round(y);
this.scroller.style.left = x + 'px';
this.scroller.style.top = y + 'px';
}
this.x = x;
this.y = y;
},
_start: function (e) {
var that = this,
point = hasTouch ? e.touches[0] : e,
matrix, x, y;
if (!that.enabled) return;
if (that.options.onBeforeScrollStart) that.options.onBeforeScrollStart.call(that, e);
if (that.options.useTransition) that._transitionTime(0);
that.moved = false;
that.animating = false;
that.zoomed = false;
that.distX = 0;
that.distY = 0;
that.absDistX = 0;
that.absDistY = 0;
that.dirX = 0;
that.dirY = 0;
if (that.options.momentum) {
if (that.options.useTransform) {
// Very lame general purpose alternative to CSSMatrix
matrix = getComputedStyle(that.scroller, null)[vendor + 'Transform'].replace(/[^0-9-.,]/g, '').split(',');
x = matrix[4] * 1;
y = matrix[5] * 1;
} else {
x = getComputedStyle(that.scroller, null).left.replace(/[^0-9-]/g, '') * 1;
y = getComputedStyle(that.scroller, null).top.replace(/[^0-9-]/g, '') * 1;
}
if (x != that.x || y != that.y) {
if (that.options.useTransition) that._unbind('webkitTransitionEnd');
else cancelFrame(that.aniTime);
that.steps = [];
that._pos(x, y);
}
}
that.absStartX = that.x; // Needed by snap threshold
that.absStartY = that.y;
that.startX = that.x;
that.startY = that.y;
that.pointX = point.pageX;
that.pointY = point.pageY;
that.startTime = e.timeStamp || (new Date()).getTime();
if (that.options.onScrollStart) that.options.onScrollStart.call(that, e);
that._bind(MOVE_EV);
that._bind(END_EV);
that._bind(CANCEL_EV);
},
_move: function (e) {
var that = this,
point = hasTouch ? e.touches[0] : e,
deltaX = point.pageX - that.pointX,
deltaY = point.pageY - that.pointY,
newX = that.x + deltaX,
newY = that.y + deltaY,
timestamp = e.timeStamp || (new Date()).getTime();
if (that.options.onBeforeScrollMove) that.options.onBeforeScrollMove.call(that, e);
that.pointX = point.pageX;
that.pointY = point.pageY;
// Slow down if outside of the boundaries
if (newX > 0 || newX < that.maxScrollX) {
newX = that.options.bounce ? that.x + (deltaX / 2) : newX >= 0 || that.maxScrollX >= 0 ? 0 : that.maxScrollX;
}
if (newY > 0 || newY < that.maxScrollY) {
newY = that.options.bounce ? that.y + (deltaY / 2) : newY >= 0 || that.maxScrollY >= 0 ? 0 : that.maxScrollY;
}
if (that.absDistX < 6 && that.absDistY < 6) {
that.distX += deltaX;
that.distY += deltaY;
that.absDistX = m.abs(that.distX);
that.absDistY = m.abs(that.distY);
return;
}
// Lock direction
if (that.options.lockDirection) {
if (that.absDistX > that.absDistY + 5) {
newY = that.y;
deltaY = 0;
} else if (that.absDistY > that.absDistX + 5) {
newX = that.x;
deltaX = 0;
}
}
that.moved = true;
that._pos(newX, newY);
that.dirX = deltaX > 0 ? -1 : deltaX < 0 ? 1 : 0;
that.dirY = deltaY > 0 ? -1 : deltaY < 0 ? 1 : 0;
if (timestamp - that.startTime > 300) {
that.startTime = timestamp;
that.startX = that.x;
that.startY = that.y;
}
if (that.options.onScrollMove) that.options.onScrollMove.call(that, e);
},
_end: function (e) {
if (hasTouch && e.touches.length != 0) return;
var that = this,
point = hasTouch ? e.changedTouches[0] : e,
target, ev,
momentumX = { dist:0, time:0 },
momentumY = { dist:0, time:0 },
duration = (e.timeStamp || (new Date()).getTime()) - that.startTime,
newPosX = that.x,
newPosY = that.y,
newDuration;
that._unbind(MOVE_EV);
that._unbind(END_EV);
that._unbind(CANCEL_EV);
if (that.options.onBeforeScrollEnd) that.options.onBeforeScrollEnd.call(that, e);
if (!that.moved) {
if (hasTouch) {
// Find the last touched element
target = point.target;
while (target.nodeType != 1) target = target.parentNode;
if (target.tagName != 'SELECT' && target.tagName != 'INPUT' && target.tagName != 'TEXTAREA') {
ev = document.createEvent('MouseEvents');
ev.initMouseEvent('click', true, true, e.view, 1,
point.screenX, point.screenY, point.clientX, point.clientY,
e.ctrlKey, e.altKey, e.shiftKey, e.metaKey,
0, null);
ev._fake = true;
target.dispatchEvent(ev);
}
}
that._resetPos(200);
if (that.options.onTouchEnd) that.options.onTouchEnd.call(that, e);
return;
}
if (duration < 300 && that.options.momentum) {
momentumX = newPosX ? that._momentum(newPosX - that.startX, duration, -that.x, that.scrollerW - that.wrapperW + that.x, that.options.bounce ? that.wrapperW : 0) : momentumX;
momentumY = newPosY ? that._momentum(newPosY - that.startY, duration, -that.y, (that.maxScrollY < 0 ? that.scrollerH - that.wrapperH + that.y : 0), that.options.bounce ? that.wrapperH : 0) : momentumY;
newPosX = that.x + momentumX.dist;
newPosY = that.y + momentumY.dist;
if ((that.x > 0 && newPosX > 0) || (that.x < that.maxScrollX && newPosX < that.maxScrollX)) momentumX = { dist:0, time:0 };
if ((that.y > 0 && newPosY > 0) || (that.y < that.maxScrollY && newPosY < that.maxScrollY)) momentumY = { dist:0, time:0 };
}
if (momentumX.dist || momentumY.dist) {
newDuration = m.max(m.max(momentumX.time, momentumY.time), 10);
that.scrollTo(newPosX, newPosY, newDuration);
if (that.options.onTouchEnd) that.options.onTouchEnd.call(that, e);
return;
}
that._resetPos(200);
if (that.options.onTouchEnd) that.options.onTouchEnd.call(that, e);
},
_resetPos: function (time) {
var that = this,
resetX = that.x >= 0 ? 0 : that.x < that.maxScrollX ? that.maxScrollX : that.x,
resetY = that.y >= 0 || that.maxScrollY > 0 ? 0 : that.y < that.maxScrollY ? that.maxScrollY : that.y;
if (resetX == that.x && resetY == that.y) {
if (that.moved) {
if (that.options.onScrollEnd) that.options.onScrollEnd.call(that); // Execute custom code on scroll end
that.moved = false;
}
return;
}
that.scrollTo(resetX, resetY, time || 0);
},
_mouseout: function (e) {
var t = e.relatedTarget;
if (!t) {
this._end(e);
return;
}
while (t = t.parentNode) if (t == this.wrapper) return;
this._end(e);
},
_transitionEnd: function (e) {
var that = this;
if (e.target != that.scroller) return;
that._unbind('webkitTransitionEnd');
that._startAni();
},
/**
*
* Utilities
*
*/
_startAni: function () {
var that = this,
startX = that.x, startY = that.y,
startTime = (new Date).getTime(),
step, easeOut;
if (that.animating) return;
if (!that.steps.length) {
that._resetPos(400);
return;
}
step = that.steps.shift();
if (step.x == startX && step.y == startY) step.time = 0;
that.animating = true;
that.moved = true;
if (that.options.useTransition) {
that._transitionTime(step.time);
that._pos(step.x, step.y);
that.animating = false;
if (step.time) that._bind('webkitTransitionEnd');
else that._resetPos(0);
return;
}
(function animate () {
var now = (new Date).getTime(),
newX, newY;
if (now >= startTime + step.time) {
that._pos(step.x, step.y);
that.animating = false;
if (that.options.onAnimationEnd) that.options.onAnimationEnd.call(that); // Execute custom code on animation end
that._startAni();
return;
}
now = (now - startTime) / step.time - 1;
easeOut = m.sqrt(1 - now * now);
newX = (step.x - startX) * easeOut + startX;
newY = (step.y - startY) * easeOut + startY;
that._pos(newX, newY);
if (that.animating) that.aniTime = nextFrame(animate);
})();
},
_transitionTime: function (time) {
this.scroller.style[vendor + 'TransitionDuration'] = time + 'ms';
},
_momentum: function (dist, time, maxDistUpper, maxDistLower, size) {
var deceleration = 0.0006,
speed = m.abs(dist) / time,
newDist = (speed * speed) / (2 * deceleration),
newTime = 0, outsideDist = 0;
// Proportinally reduce speed if we are outside of the boundaries
if (dist > 0 && newDist > maxDistUpper) {
outsideDist = size / (6 / (newDist / speed * deceleration));
maxDistUpper = maxDistUpper + outsideDist;
speed = speed * maxDistUpper / newDist;
newDist = maxDistUpper;
} else if (dist < 0 && newDist > maxDistLower) {
outsideDist = size / (6 / (newDist / speed * deceleration));
maxDistLower = maxDistLower + outsideDist;
speed = speed * maxDistLower / newDist;
newDist = maxDistLower;
}
newDist = newDist * (dist < 0 ? -1 : 1);
newTime = speed / deceleration;
return { dist: newDist, time: m.round(newTime) };
},
_offset: function (el) {
var left = -el.offsetLeft,
top = -el.offsetTop;
while (el = el.offsetParent) {
left -= el.offsetLeft;
top -= el.offsetTop;
}
return { left: left, top: top };
},
_bind: function (type, el, bubble) {
(el || this.scroller).addEventListener(type, this, !!bubble);
},
_unbind: function (type, el, bubble) {
(el || this.scroller).removeEventListener(type, this, !!bubble);
},
/**
*
* Public methods
*
*/
destroy: function () {
var that = this;
that.scroller.style[vendor + 'Transform'] = '';
// Remove the event listeners
that._unbind(RESIZE_EV, window);
that._unbind(START_EV);
that._unbind(MOVE_EV);
that._unbind(END_EV);
that._unbind(CANCEL_EV);
that._unbind('mouseout', that.wrapper);
if (that.options.useTransition) that._unbind('webkitTransitionEnd');
if (that.options.onDestroy) that.options.onDestroy.call(that);
},
refresh: function () {
var that = this,
offset;
that.wrapperW = that.wrapper.clientWidth;
that.wrapperH = that.wrapper.clientHeight;
that.scrollerW = that.scroller.offsetWidth;
that.scrollerH = that.scroller.offsetHeight;
that.maxScrollX = that.wrapperW - that.scrollerW;
that.maxScrollY = that.wrapperH - that.scrollerH;
that.dirX = 0;
that.dirY = 0;
that.hScroll = that.options.hScroll && that.maxScrollX < 0;
that.vScroll = that.options.vScroll && (!that.options.bounceLock && !that.hScroll || that.scrollerH > that.wrapperH);
offset = that._offset(that.wrapper);
that.wrapperOffsetLeft = -offset.left;
that.wrapperOffsetTop = -offset.top;
that.scroller.style[vendor + 'TransitionDuration'] = '0';
that._resetPos(200);
},
scrollTo: function (x, y, time, relative) {
var that = this,
step = x,
i, l;
that.stop();
if (!step.length) step = [{ x: x, y: y, time: time, relative: relative }];
for (i=0, l=step.length; i<l; i++) {
if (step[i].relative) { step[i].x = that.x - step[i].x; step[i].y = that.y - step[i].y; }
that.steps.push({ x: step[i].x, y: step[i].y, time: step[i].time || 0 });
}
that._startAni();
},
scrollToElement: function (el, time) {
var that = this, pos;
el = el.nodeType ? el : that.scroller.querySelector(el);
if (!el) return;
pos = that._offset(el);
pos.left += that.wrapperOffsetLeft;
pos.top += that.wrapperOffsetTop;
pos.left = pos.left > 0 ? 0 : pos.left < that.maxScrollX ? that.maxScrollX : pos.left;
pos.top = pos.top > 0 ? 0 : pos.top < that.maxScrollY ? that.maxScrollY : pos.top;
time = time === undefined ? m.max(m.abs(pos.left)*2, m.abs(pos.top)*2) : time;
that.scrollTo(pos.left, pos.top, time);
},
disable: function () {
this.stop();
this._resetPos(0);
this.enabled = false;
// If disabled after touchstart we make sure that there are no left over events
this._unbind(MOVE_EV);
this._unbind(END_EV);
this._unbind(CANCEL_EV);
},
enable: function () {
this.enabled = true;
},
stop: function () {
cancelFrame(this.aniTime);
this.steps = [];
this.moved = false;
this.animating = false;
}
};
if (typeof exports !== 'undefined') exports.iScroll = iScroll;
else window.iScroll = iScroll;
})(); | PypiClean |
/ACCC-0.0.3.tar.gz/ACCC-0.0.3/accc/compiler/compiler.py | from math import log, ceil
from itertools import zip_longest
from functools import partial, lru_cache
import itertools
import accc.langspec as langspec
#########################
# PRE-DECLARATIONS #
#########################
# lexems seens in structure
from accc.lexems import LEXEM_TYPE_CONDITION, LEXEM_TYPE_ACTION
from accc.lexems import LEXEM_TYPE_PREDICAT, LEXEM_TYPE_DOWNLEVEL
# lexems only seen in values
from accc.lexems import LEXEM_TYPE_COMPARISON, LEXEM_TYPE_OPERATOR
from accc.lexems import LEXEM_TYPE_UINTEGER
# all lexems
from accc.lexems import ALL as ALL_LEXEMS
#########################
# COMPILER CLASS #
#########################
class Compiler():
"""
Compiler of code writed with any vocabulary. ('01', 'ATGC', 'whatevr',…)
A source code is an ordered list of vocabulary elements
('10011010000101', 'AGGATGATCAGATA', 'wtrvwhttera'…).
Whatever the given source_code, it's always compilable. (but can return empty object code)
Also, it can be totally illogic (do many times the same test, do nothing,…)
The source code is readed entirely for determine STRUCTURE,
and then re-readed for determines effectives VALUES.
The STRUCTURE defines:
- logic of the code
- lexems type that will be used
The VALUES defines:
- what are the exact value of each lexem
- values of integers used as function parameters
Example of prettified STRUCTURE:
if C:
A
if C:
A
A
if P and P:
A
A
A
if P:
A
VALUES will describes which is the lexem effectively used for each
word, C, A or P. (condition, action, predicat)
NB: D is the char that indicate a indent level decrease
The dictionnary values vocabulary, given at compiler creation, define lexems :
vocabulary_values = {
LEXEM_TYPE_COMPARISON: ('parameter1', 'parameter2', 'parameter3', 'parameter4'),
LEXEM_TYPE_PREDICAT : ('have_that', 'is_this', 'have_many_things', 'know_that'),
LEXEM_TYPE_ACTION : ('do_that', 'say_this'),
LEXEM_TYPE_OPERATOR : ('>', '==', '<', 'is', '!='),
}
Then, compiled code can be something like:
if parameter1 == parameter2 and have_that:
do_that
if have_that:
say_this
do_that
if know_that and have_many_things:
do_that
say_this
do_that
if have_many_things:
say_this
Modification of provided lexems types is not supported at this time.
"""
# CONSTRUCTOR #################################################################
def __init__(self, alphabet, target_language_spec, comparables, predicats, actions, operators,
neutral_value_condition='True', neutral_value_action='pass'):
"""
Wait for alphabet ('01', 'ATGC',…), language specification and vocabularies of
structure and values parts.
Neutral value is used when no value is finded.
Set it to something that pass in all cases.
NB: a little source code lead to lots of neutral values.
"""
self.alphabet = alphabet
self.voc_structure = ALL_LEXEMS
self.target_lang_spec = target_language_spec()
self.voc_values = {
LEXEM_TYPE_COMPARISON: comparables,
LEXEM_TYPE_PREDICAT : predicats,
LEXEM_TYPE_ACTION : actions,
LEXEM_TYPE_OPERATOR : operators,
}
self.neutral_value_action = neutral_value_action
self.neutral_value_condition = neutral_value_condition
# verifications
assert(issubclass(neutral_value_action.__class__, str)
and issubclass(neutral_value_condition.__class__, str)
)
# prepare tables of words->lexems
self._initialize_tables()
# PUBLIC METHODS ###############################################################
def compile(self, source_code, post_treatment=''.join):
"""Compile given source code.
Return object code, modified by given post treatment.
"""
# read structure
structure = self._structure(source_code)
values = self._struct_to_values(structure, source_code)
# create object code, translated in targeted language
obj_code = langspec.translated(
structure, values,
self.target_lang_spec
)
# apply post treatment and return
return obj_code if post_treatment is None else post_treatment(obj_code)
# PRIVATE METHODS ##############################################################
def _initialize_tables(self):
"""Create tables for structure and values, word->vocabulary"""
# structure table
self.table_struct, self.idnt_struct_size = self._create_struct_table()
# values table
self.table_values, self.idnt_values_size = self._create_values_table()
# debug print
#print(self.table_struct)
#print(self.idnt_struct_size)
#print(self.table_values)
#print(self.idnt_values_size)
def _structure(self, source_code):
"""return structure in ACDP format."""
# define cutter as a per block reader
def cutter(seq, block_size):
for index in range(0, len(seq), block_size):
lexem = seq[index:index+block_size]
if len(lexem) == block_size:
yield self.table_struct[seq[index:index+block_size]]
return tuple(cutter(source_code, self.idnt_struct_size))
def _next_lexem(self, lexem_type, source_code, source_code_size):
"""Return next readable lexem of given type in source_code.
If no value can be found, the neutral_value will be used"""
# define reader as a lexem extractor
def reader(seq, block_size):
identificator = ''
for char in source_code:
if len(identificator) == self.idnt_values_size[lexem_type]:
yield self.table_values[lexem_type][identificator]
identificator = ''
identificator += char
lexem_reader = reader(source_code, self.idnt_values_size)
lexem = None
time_out = 0
while lexem == None and time_out < 2*source_code_size:
lexem = next(lexem_reader)
time_out += 1
# here we have found a lexem
return lexem
def _next_condition_lexems(self, source_code, source_code_size):
"""Return condition lexem readed in source_code"""
# find three lexems
lexems = tuple((
self._next_lexem(LEXEM_TYPE_COMPARISON, source_code, source_code_size),
self._next_lexem(LEXEM_TYPE_OPERATOR , source_code, source_code_size),
self._next_lexem(LEXEM_TYPE_COMPARISON, source_code, source_code_size)
))
# verify integrity
if None in lexems: # one of the condition lexem was not found in source code
return None
else: # all lexems are valid
return ' '.join(lexems)
@lru_cache(maxsize = 100)
def _string_to_int(self, s):
"""Read an integer in s, in Little Indian. """
base = len(self.alphabet)
return sum((self._letter_to_int(l) * base**lsb
for lsb, l in enumerate(s)
))
@lru_cache(maxsize = None)
def _letter_to_int(self, l):
return self.alphabet.index(l)
@lru_cache(maxsize = 127) # source code is potentially largely variable on length
def _integer_size_for(self, source_code_size):
"""Find and return the optimal integer size.
A perfect integer can address all indexes of
a string of size source_code_size.
"""
return ceil(log(source_code_size, len(self.alphabet)))
def _struct_to_values(self, structure, source_code):
"""Return list of values readed in source_code,
according to given structure.
"""
# iterate on source code until all values are finded
# if a value is not foundable,
# (ie its identificator is not in source code)
# it will be replaced by associated neutral value
iter_source_code = itertools.cycle(source_code)
values = []
for lexem_type in (l for l in structure if l is not 'D'):
if lexem_type is LEXEM_TYPE_CONDITION:
new_value = self._next_condition_lexems(
iter_source_code, len(source_code)
)
else:
new_value = self._next_lexem(
lexem_type, iter_source_code, len(source_code)
)
# if values is unvalid:
# association with the right neutral value
if new_value is None:
if lexem_type in (LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION):
new_value = self.neutral_value_condition
else:
new_value = self.neutral_value_action
values.append(new_value)
return values
# TABLE METHODS ################################################################
def _create_struct_table(self):
"""Create table identificator->vocabulary,
and return it with size of an identificator"""
len_alph = len(self.alphabet)
len_vocb = len(self.voc_structure)
identificator_size = ceil(log(len_vocb, len_alph))
# create list of lexems
num2alph = lambda x, n: self.alphabet[(x // len_alph**n) % len_alph]
identificators = [[str(num2alph(x, n))
for n in range(identificator_size)
]
for x in range(len_vocb)
]
# initialize table and iterable
identificators_table = {}
zip_id_voc = zip_longest(
identificators, self.voc_structure,
fillvalue=None
)
# create dict identificator:word
for idt, word in zip_id_voc:
identificators_table[''.join(idt)] = word
return identificators_table, identificator_size
def _create_values_table(self):
"""Create table lexem_type->{identificator->vocabulary},
and return it with sizes of an identificator as lexem_type->identificator_size"""
# number of existing character, and returned dicts
len_alph = len(self.alphabet)
identificators_table = {k:{} for k in self.voc_values.keys()}
identificators_sizes = {k:-1 for k in self.voc_values.keys()}
for lexem_type, vocabulary in self.voc_values.items():
# find number of different values that can be found,
# and size of an identificator.
len_vocb = len(vocabulary)
identificators_sizes[lexem_type] = ceil(log(len_vocb, len_alph))
# create list of possible identificators
num2alph = lambda x, n: self.alphabet[(x // len_alph**n) % len_alph]
identificators = [[str(num2alph(x, n))
for n in range(identificators_sizes[lexem_type])
] # this list is an identificator
for x in range(len_alph**identificators_sizes[lexem_type])
] # this one is a list of identificator
# initialize iterable
zip_id_voc = zip_longest(
identificators, vocabulary,
fillvalue=None
)
# create dict {identificator:word}
for idt, voc in zip_id_voc:
identificators_table[lexem_type][''.join(idt)] = voc
# return all
return identificators_table, identificators_sizes
# PREDICATS ###################################################################
# ACCESSORS ###################################################################
# CONVERSION ##################################################################
# OPERATORS ################################################################### | PypiClean |
/HybridUI-0.0.1.tar.gz/HybridUI-0.0.1/hybrid/elements/tabe.py | from typing import List
from ..element import Element
class Tabe(Element):
def __init__(
self,
content: List[Element] = None,
activeKey= None,
addIcon = None,
animated = None,
centered = None,
defaultActiveKey = None,
hideAdd = None,
items = None,
moreIcon= None,
popupClassName = None,
renderTabBar = None,
size = None,
tabBarExtraContent= None,
tabBarGutter = None,
tabBarStyle = None,
tabPosition= None,
destroyInactiveTabPane= None,
type = None,
onChange = None,
onEdit = None,
onTabClick= None,
onTabScroll= None,
):
super().__init__(component='Tabs')
self.children = content
if activeKey is not None:
self._props["activeKey"] = activeKey
if addIcon is not None:
self._props["addIcon"] = addIcon
if animated is not None:
self._props["animated"] = animated
if centered is not None:
self._props["centered"] = centered
if defaultActiveKey is not None:
self._props["defaultActiveKey"] = defaultActiveKey
if hideAdd is not None:
self._props["hideAdd"] = hideAdd
if items is not None:
self._props["items"] = items
if moreIcon is not None:
self._props["moreIcon"] = moreIcon
if popupClassName is not None:
self._props["popupClassName"] = popupClassName
if renderTabBar is not None:
self._props["renderTabBar"] = renderTabBar
if size is not None:
self._props["size"] = size
if tabBarExtraContent is not None:
self._props["tabBarExtraContent"] = tabBarExtraContent
if tabBarGutter is not None:
self._props["tabBarGutter"] = tabBarGutter
if tabBarStyle is not None:
self._props["tabBarStyle"] = tabBarStyle
if tabPosition is not None:
self._props["tabPosition"] = tabPosition
if destroyInactiveTabPane is not None:
self._props["destroyInactiveTabPane"] = destroyInactiveTabPane
if type is not None:
self._props["type"] = type
if onChange is not None:
self._props["onChange"] = onChange
if onEdit is not None:
self._props["onEdit"] = onEdit
if onTabClick is not None:
self._props["onTabClick"] = onTabClick
if onTabScroll is not None:
self._props["onTabScroll"] = onTabScroll | PypiClean |
/AADeepLearning-1.0.8.tar.gz/AAdeepLearning-1.0.8/example/aa_save.py | from AADeepLearning import AADeepLearning
from AADeepLearning.datasets import mnist
from AADeepLearning.datasets import np_utils
# mnist数据集已经被划分成了60,000个训练集,10,000个测试集的形式,如果数据不存在则自动下载
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 第一个维度是样本数目,第二维度是通道数表示颜色通道数,第三维度是高,第四个维度是宽
x_train = x_train.reshape(x_train.shape[0], 1, 28, 28)
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28)
# 将x_train, x_test的数据格式转为float32
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# 归一化,将值映射到 0到1区间
x_train /= 255
x_test /= 255
# 因为是10分类,所以将类别向量(从0到10的整数向量)映射为二值类别矩阵,相当于将向量用one-hot重新编码
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
# 网络配置文件
config = {
# 初始学习率
"learning_rate": 0.001,
# 优化策略: sgd/momentum/rmsprop/adam
"optimizer": "adam",
# 使用动量的梯度下降算法做优化,可以设置这一项,默认值为 0.9 ,一般不需要调整
"momentum_coefficient": 0.9,
# 训练多少次
"number_iteration": 1000,
# 每次用多少个样本训练
"batch_size": 64,
# 迭代多少次打印一次信息
"display": 100,
# 保存模型快照的名称
"save_model": "AA",
# 每隔几个迭代周期保存一次快照? 保存名称为 名称+迭代次数+.model
"save_iteration": 1000,
}
# 网络结构,数据将从上往下传播
net = [
{
# 层名,无限制
"name": "flatten_1",
# 层类型,将数据展平为适合神经网络的结构,用于输入层或者卷积层和全连接层中间。 (60000, 1, 28, 28) ——> (784, 60000)
"type": "flatten"
},
{
# 层名
"name": "fully_connected_1",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数
"neurons_number": 256,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "relu_1",
# 层类型(激活层) 可选,relu,sigmoid,tanh,
"type": "relu"
},
{
# 层名
"name": "fully_connected_2",
# 层类型,全连接层
"type": "fully_connected",
# 神经元个数, 因为是10分类,所以神经元个数为10
"neurons_number": 10,
# 权重初始化方式 msra/xavier/gaussian
"weight_init": "msra"
},
{
# 层名
"name": "softmax_1",
# 层类型,分类层,最终输出十分类的概率分布
"type": "softmax"
}
]
# 定义模型,传入网络结构和配置项
AA = AADeepLearning(net=net, config=config)
# 训练模型
AA.train(x_train=x_train, y_train=y_train)
# 使用测试集预测,返回概率分布和准确率, score:样本在各个分类上的概率, accuracy:准确率
score, accuracy = AA.predict(x_test=x_test, y_test=y_test)
print("test set accuracy:", accuracy) | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/eval/nevaluator.py | from typing import Optional
import sympy
from mathics.core.atoms import Number
from mathics.core.attributes import A_N_HOLD_ALL, A_N_HOLD_FIRST, A_N_HOLD_REST
from mathics.core.convert.sympy import from_sympy
from mathics.core.element import BaseElement
from mathics.core.evaluation import Evaluation
from mathics.core.expression import Expression
from mathics.core.number import PrecisionValueError, get_precision
from mathics.core.symbols import Atom
from mathics.core.systemsymbols import SymbolMachinePrecision, SymbolN
# FIXME: Add the two-argument form N[expr, n]
def eval_N(
expression: BaseElement,
evaluation: Evaluation,
prec: BaseElement = SymbolMachinePrecision,
) -> BaseElement:
"""
Equivalent to Expression(SymbolN, expression).evaluate(evaluation)
"""
evaluated_expression = expression.evaluate(evaluation)
result = eval_NValues(evaluated_expression, prec, evaluation)
if result is None:
return expression
if isinstance(result, Number):
return result
return result.evaluate(evaluation)
def eval_NValues(
expr: BaseElement, prec: BaseElement, evaluation: Evaluation
) -> Optional[BaseElement]:
"""
Looks for the numeric value of ```expr`` with precision ``prec`` by applying NValues rules
stored in ``evaluation.definitions``.
If ``prec`` can not be evaluated as a number, returns None, otherwise, returns an expression.
"""
# The first step is to determine the precision goal
try:
# Here ``get_precision`` is called with ``show_messages``
# set to ``False`` to avoid show the same warnings repeatedly.
d = get_precision(prec, evaluation, show_messages=False)
except PrecisionValueError:
# We can ensure that the function always return an expression if
# the exception was captured by the caller.
return
# If the expression is a number, just round it to the required
# precision
if isinstance(expr, Number):
return expr.round(d)
# If expr is a List, or a Rule (or maybe expressions with heads for
# which we are sure do not have NValues or special attributes)
# just apply `eval_NValues` to each element and return the new list.
if expr.get_head_name() in ("System`List", "System`Rule"):
elements = expr.elements
# FIXME: incorporate these lines into Expression call
result = Expression(expr.head)
new_elements = [
eval_NValues(element, prec, evaluation) for element in expr.elements
]
result.elements = tuple(
new_element if new_element else element
for element, new_element in zip(elements, new_elements)
)
result._build_elements_properties()
return result
# Special case for the Root builtin
# This should be implemented as an NValue
if expr.has_form("Root", 2):
return from_sympy(sympy.N(expr.to_sympy(), d))
# Here we look for the NValues associated to the
# lookup_name of the expression.
# If a rule is found and successfuly applied,
# reevaluate the result and apply `eval_NValues` again.
# This should be implemented as a loop instead of
# recursively.
name = expr.get_lookup_name()
if name != "":
nexpr = Expression(SymbolN, expr, prec)
result = evaluation.definitions.get_value(
name, "System`NValues", nexpr, evaluation
)
if result is not None:
if not result.sameQ(nexpr):
result = result.evaluate(evaluation)
result = eval_NValues(result, prec, evaluation)
return result
# If we are here, is because there are not NValues that matches
# to the expression. In such a case, if we arrive to an atomic expression,
# just return it.
if isinstance(expr, Atom):
return expr
else:
# Otherwise, look at the attributes, determine over which elements
# we need to apply `eval_NValues`, and rebuild the expression with
# the results.
attributes = expr.head.get_attributes(evaluation.definitions)
head = expr.head
elements = expr.get_mutable_elements()
if A_N_HOLD_ALL & attributes:
eval_range = ()
elif A_N_HOLD_FIRST & attributes:
eval_range = range(1, len(elements))
elif A_N_HOLD_REST & attributes:
if len(expr.elements) > 0:
eval_range = (0,)
else:
eval_range = ()
else:
eval_range = range(len(elements))
newhead = eval_NValues(head, prec, evaluation)
head = head if newhead is None else newhead
for index in eval_range:
new_element = eval_NValues(elements[index], prec, evaluation)
if new_element:
elements[index] = new_element
# FIXME: incorporate these 3 lines into Expression call
result = Expression(head)
result.elements = elements
result._build_elements_properties()
return result
# TODO: Revisit - can this be simplified? Is some broader framework this fits into?
# comment mmatera: Other methods that I would like to have here, as non-member methods are | PypiClean |
/JqPyCharts-4.0.0.tar.bz2/JqPyCharts-4.0.0/README.rst | ==========
JqPyCharts
==========
About
=====
`JqPyCharts` is a selection of: Javascripts / Css for simple charts in python projects.
**Credit goes to all the projects which `JqPyCharts` makes use of:**
- `jquery <https://jquery.org/>`_
- `jqPlot <https://bitbucket.org/cleonello/jqplot>`_
The latest documentation can be found online at `<http://packages.python.org/JqPyCharts>`_.
Requirements
============
See: RequiredSoftware in documentation or::
{SOURCE}/docs/RequiredSoftware.rst
Installation
============
#. To install from pypi using ``pip/pip3``::
$ pip3 install JqPyCharts
#. To install from the source::
$ python3 setup.py install
Building the Documentation
--------------------------
If you wish to generate your own copy of the documentation, you will need to:
#. Get the `JqPyCharts` source.
#. If not already installed - install `PSphinxTheme <https://github.com/peter1000/PSphinxTheme>`_ ::
$ pip3 install PSphinxTheme
#. From the `JqPyCharts` source directory, run ``python3 setup.py build_sphinx -E``.
#. Once Sphinx is finished, point a web browser to the file::
{SOURCE}/build/sphinx/html/index.html
Online Resources
================
- Docs: http://packages.python.org/JqPyCharts
- PyPI: http://pypi.python.org/pypi/JqPyCharts
- Source: https://github.com/peter1000/JqPyCharts
Projects using JqPyCharts
=========================
`projects` which make use of: **JqPyCharts**
- `PyNHEP <https://github.com/peter1000/PyNHEP>`_
PyNHEP: The N(utrition) and H(ealthy) E(ating) P(lanner).
|
|
`JqPyCharts` is distributed under the terms of the BSD 3-clause license.
Consult `LICENSE` or `BSD-3-Clause <http://opensource.org/licenses/BSD-3-Clause>`_.
(c) 2014, `peter1000` https://github.com/peter1000
All rights reserved.
|
|
| PypiClean |
/Montreal-Forced-Aligner-3.0.0a3.tar.gz/Montreal-Forced-Aligner-3.0.0a3/docs/source/first_steps/tutorials.rst |
.. _`filing an issue`: https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/issues
.. _`Montreal Forced Aligner v2 Corpus Phonetics Tutorial`: https://www.eleanorchodroff.com/tutorial/montreal-forced-aligner-v2.html
.. _`Phonetic forced alignment with the Montreal Forced Aligner`: https://www.youtube.com/watch?v=Zhj-ccMDj_w
.. _`How I used Montreal Forced Aligner for a New Language (Sinhalese)`: https://medium.com/breaktheloop/how-i-used-montreal-forced-aligner-for-a-new-language-sinhalese-8f2c22a65a22
.. _`Bootstrapping an IPA dictionary for English using Montreal Forced Aligner 2.0`: https://mmcauliffe.medium.com/creating-english-ipa-dictionary-using-montreal-forced-aligner-2-0-242415dfee32
.. _`Update on Montreal Forced Aligner performance`: https://memcauliffe.com/update-on-montreal-forced-aligner-performance.html
.. _`Speaker dictionaries and multilingual IPA`: https://memcauliffe.com/speaker-dictionaries-and-multilingual-ipa.html
.. _tutorials:
External tutorials
==================
I will try to keep this updated with a list of in-depth tutorials for using MFA. If you write up anything that could be included here, please let me know by `filing an issue`_ and I will add it.
* `Montreal Forced Aligner v2 Corpus Phonetics Tutorial`_ (Now updated for 2.0!)
* Courtesy of :xref:`chodroff`
* `Phonetic forced alignment with the Montreal Forced Aligner`_ (YouTube recording)
* Courtesy of :xref:`chodroff` and :xref:`rutgers_spanish_portuguese`
* `How I used Montreal Forced Aligner for a New Language (Sinhalese)`_
* Courtesy of :xref:`dias`
* `Bootstrapping an IPA dictionary for English using Montreal Forced Aligner 2.0`_
* `Update on Montreal Forced Aligner performance`_
* `Speaker dictionaries and multilingual IPA`_
| PypiClean |
/MITunaX-0.1.tar.gz/MITunaX-0.1/tuna/worker_interface.py | """Module that represents the WorkerInterface class interface"""
from multiprocessing import Process, Lock
try:
import queue
except ImportError:
import Queue as queue
import logging
import os
import json
from datetime import datetime
import socket
import random
import string
from time import sleep
from sqlalchemy import func as sqlalchemy_func
from sqlalchemy.exc import IntegrityError, OperationalError #pylint: disable=wrong-import-order
from tuna.dbBase.sql_alchemy import DbSession
from tuna.utils.db_utility import get_id_solvers
from tuna.abort import chk_abort_file
from tuna.fin_utils import compose_config_obj
from tuna.metadata import TUNA_LOG_DIR, TUNA_DOCKER_NAME, PREC_TO_CMD
from tuna.metadata import TABLE_COLS_FUSION_MAP, TABLE_COLS_CONV_MAP, INVERS_DIR_MAP
from tuna.metadata import ENV_SLVGRP_MAP, SLV_ENV_MAP
from tuna.metadata import FIND_ONLY_EXCEPTION
from tuna.metadata import get_solver_ids, TENSOR_PRECISION
from tuna.tables import DBTables
from tuna.db_tables import connect_db
from tuna.config_type import ConfigType
MAX_JOB_RETRIES = 10
NUM_SQL_RETRIES = 10
TABLE_COLS_CONV_INVMAP = {}
for clarg, cnvparam in TABLE_COLS_CONV_MAP.items():
if not cnvparam[0] in TABLE_COLS_CONV_INVMAP:
TABLE_COLS_CONV_INVMAP[cnvparam[0]] = clarg
elif len(clarg) > len(TABLE_COLS_CONV_INVMAP[cnvparam[0]]):
TABLE_COLS_CONV_INVMAP[cnvparam[0]] = clarg
TABLE_COLS_FUSION_INVMAP = {}
for clarg, cnvparam in TABLE_COLS_FUSION_MAP.items():
if not cnvparam[0] in TABLE_COLS_FUSION_INVMAP:
TABLE_COLS_FUSION_INVMAP[cnvparam[0]] = clarg
elif len(clarg) > len(TABLE_COLS_FUSION_INVMAP[cnvparam[0]]):
TABLE_COLS_FUSION_INVMAP[cnvparam[0]] = clarg
LOG_TIMEOUT = 10 * 60.0 # in seconds
class WorkerInterface(Process):
""" Interface class extended by Builder and Evaluator. The purpose of this class is to define
common functionalities. """
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-statements
def __init__(self, **kwargs):
"""Constructor"""
super().__init__()
allowed_keys = set([
'machine', 'gpu_id', 'num_procs', 'barred', 'bar_lock', 'envmt',
'reset_interval', 'fin_steps', 'fin_infile', 'fin_outfile', 'job_queue',
'queue_lock', 'label', 'fetch_state', 'docker_name', 'bin_cache',
'end_jobs', 'config_type', 'dynamic_solvers_only', 'session_id'
])
self.__dict__.update((key, None) for key in allowed_keys)
#for pylint
self.machine = None
self.gpu_id = None
self.num_procs = self.barred = None
self.bar_lock = Lock()
self.envmt = []
self.fin_steps = []
self.fin_infile = None
self.fin_outfile = None
self.job_queue = None
self.queue_lock = Lock()
self.is_fdb = False
self.find_db = None
self.fetch_state = ['new']
self.compile_only = False
self.docker_name = TUNA_DOCKER_NAME
self.gpu = None
self.bin_cache = False
self.label = None
self.end_jobs = None
self.solver_id_map, _ = get_solver_ids()
self.id_solver_map, _ = get_id_solvers()
self.dynamic_solvers_only = False
self.config_type = ConfigType.convolution if self.config_type is None else self.config_type
self.config_dict = None
self.session_id = None
self.__dict__.update(
(key, value) for key, value in kwargs.items() if key in allowed_keys)
self.dbt = DBTables(session_id=self.session_id,
config_type=self.config_type)
self.miopen_user_db_path = "/tmp/miopenpdb/thread-{}/config/miopen".format(
self.gpu_id)
self.envmt.append(
"MIOPEN_CUSTOM_CACHE_DIR=/tmp/miopenpdb/thread-{}/cache".format(
self.gpu_id))
self.envmt.append("MIOPEN_USER_DB_PATH={}".format(self.miopen_user_db_path))
self.hostname = self.machine.hostname
self.poll_retries = 0
self.job = None
self.config = None
self.solver = None
self.cmd_iter = 1
self.claim_num = self.num_procs.value
self.last_reset = datetime.now()
dir_name = os.path.join(TUNA_LOG_DIR,
type(self).__name__,
"{}_{}p".format(self.hostname, self.machine.port))
if not os.path.exists(dir_name):
os.makedirs(dir_name)
logger_name = os.path.join(dir_name, str(self.gpu_id))
self.set_logger(logger_name)
connect_db()
#call machine.connect and machine.set_logger in run (inside the subprocess)
#also set cnx here in case WorkerInterface exec_command etc called directly
self.cnx = self.machine.connect(chk_abort_file)
def set_logger(self, logger_name):
"""Build logger with given name"""
# JD: This needs to be moved to logger.py
log_level = os.environ.get('TUNA_LOGLEVEL', None)
lgr = logging.getLogger(logger_name)
log_file = os.path.join(TUNA_LOG_DIR, logger_name + ".log")
fmt = logging.Formatter(
'%(lineno)d - %(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(log_file, mode='a')
file_handler.setFormatter(fmt)
file_handler.setLevel(log_level.upper() if log_level else logging.INFO)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(fmt)
stream_handler.setLevel(logging.INFO)
lgr.addHandler(file_handler)
lgr.addHandler(stream_handler)
lgr.setLevel(log_level.upper() if log_level else logging.DEBUG)
self.logger = lgr
#@staticmethod
def check_status(self, err):
"""Function to check err status"""
if err is not None and hasattr(err,
'channel') and err.channel.exit_status > 0:
self.logger.warning(err)
status = False
else: # -1 means the command is still running
status = True
return status
def reset_machine(self):
"""Function to reset machhine"""
self.machine.restart_server()
self.last_reset = datetime.now()
def process_log_line(self, stdout):
"""Parse log from run command"""
timeout = False
error_cfg = False
abort_cfg = False
error_bad_param = False
self.compile_only = False
for line in stdout:
try:
if chk_abort_file(self.machine.id, self.logger, self.machine.arch):
with self.bar_lock:
self.num_procs.value -= 1
break
decoded_line = line.strip() # lines.strip().decode()
self.logger.info(decoded_line)
low_line = decoded_line.lower()
if low_line.find('error') != -1 and not self.compile_only:
#ingore search failed error from miopen
if 'search failed' in low_line:
continue
# miopen throws an error to escape after compiling when using MIOPEN_COMPILE_AND_RUN=0
err1 = 'search skipped' in low_line
# miopen also throws an error when MIOPEN_DEVICE_ARCH is used
err2 = 'escaping launching kernel' in low_line
# error thrown as a result of MIOPEN_DEBUG_COMPILE_ONLY
err3 = 'miopen_debug_compile_only is enabled, escaping' in low_line
if err1 or err2 or err3:
self.compile_only = True
error_cfg = False
else:
self.logger.error('Parser found error: %s', low_line)
error_cfg = True
# when miopen doesn't search for the specified solver, it will throw bad param
if 'incorrect param' in decoded_line:
error_bad_param = True
if low_line.find('aborted') != -1:
abort_cfg = True
except (socket.timeout, socket.error):
timeout = True
self.logger.warning('Socket error, aborted')
break
if stdout is None:
abort_cfg = True
return timeout, error_cfg, abort_cfg, error_bad_param
def handle_errors_or_complete(self, error_bad_param, status, timeout,
error_cfg, abort_cfg, job_solver):
"""Function to update job state"""
success = False
if error_bad_param:
self.logger.warning('job id %s: solver %s had incorrect parameters',
self.job.id, job_solver)
self.set_job_state('bad_param')
elif not status:
# the command failed to run properly
self.logger.warning('job id %s: MIOpen Driver failed to run properly',
self.job.id)
self.set_job_state('error_status')
elif timeout:
# write to value indicating this thing has hanged and wait for it to reach num_procs - 1 ,
# then restart
# then reopen the ssh, then continue
# update the job_id status in the database to new since the job failed
self.logger.error(
'job id %s: Timeout while waiting for command to finish', self.job.id)
self.set_job_state('timeout')
self.set_barrier(self.reset_machine, True)
elif error_cfg:
self.logger.warning('job id %s errored', self.job.id)
self.set_job_state('errored')
elif abort_cfg:
self.logger.warning('job id %s aborted', self.job.id)
self.set_job_state('aborted')
else:
success = True
return success
def compose_query(self, find_state, session):
"""Helper function to compose query"""
query = session.query(self.dbt.job_table, self.dbt.config_table)\
.filter(self.dbt.job_table.session == self.dbt.session.id)\
.filter(self.dbt.job_table.valid == 1)\
.filter(self.dbt.config_table.valid == 1)
if self.label:
query = query.filter(self.dbt.job_table.reason == self.label)
if self.fin_steps:
query = query.filter(
self.dbt.job_table.fin_step.like('%' + self.fin_steps[0] + '%'))
else:
query = query.filter(self.dbt.job_table.fin_step == 'not_fin')
query = query.filter(self.dbt.job_table.retries < MAX_JOB_RETRIES)\
.filter(self.dbt.job_table.state == find_state)\
.filter(self.dbt.config_table.id == self.dbt.job_table.config)
query = query.order_by(self.dbt.job_table.retries.asc()).limit(
self.claim_num).with_for_update()
return query
def get_fdb_entry(self, session, solver):
""" Get FindDb entry from db """
fdb_entry = self.dbt.find_db_table()
fdb_entry.config = self.config.id
fdb_entry.solver = solver
fdb_entry.session = self.dbt.session.id
fdb_entry.opencl = False
fdb_entry.logger = self.logger
fdb_entry.session = self.dbt.session.id
fdb_query = fdb_entry.get_query(session, self.dbt.find_db_table,
self.dbt.solver_app, self.dbt.session.id)
obj = fdb_query.first()
return obj, fdb_entry
def update_fdb_entry(self, session, solver):
""" Add a new entry to fdb if there isnt one already """
obj, fdb_entry = self.get_fdb_entry(session, solver)
if obj: # existing entry in db
# This can be removed if we implement the delete orphan cascade
for blob in obj.blobs:
session.delete(blob)
fdb_entry = obj
else:
# Insert the above entry
session.add(fdb_entry)
return fdb_entry
def compose_fdb_entry(self, session, fin_json, fdb_obj):
"""Compose a FindDB table entry from fin_output"""
fdb_entry = self.update_fdb_entry(
session, self.solver_id_map[fdb_obj['solver_name']])
fdb_entry.fdb_key = fin_json['db_key']
fdb_entry.kernel_time = -1
fdb_entry.alg_lib = fdb_obj['algorithm']
fdb_entry.workspace_sz = -1
fdb_entry.session = self.dbt.session.id
return fdb_entry
def compose_kernel_entry(self, fdb_obj, fdb_entry):
"""Compose a new Kernel Cache entry from fin input"""
fdb_entry.valid = True
fdb_entry.workspace_sz = fdb_obj['workspace']
# Now we have the ID, lets add the binary cache objects
fdb_entry.blobs = []
for kern_obj in fdb_obj['kernel_objects']:
kernel_obj = self.dbt.kernel_cache()
kernel_obj.conv_find_db_key = fdb_entry.id
kernel_obj.kernel_name = kern_obj['kernel_file']
kernel_obj.kernel_args = kern_obj['comp_options']
kernel_obj.kernel_blob = bytes(kern_obj['blob'], 'utf-8')
kernel_obj.kernel_hash = kern_obj['md5_sum']
kernel_obj.uncompressed_size = kern_obj['uncompressed_size']
fdb_entry.blobs.append(kernel_obj)
return True
def process_fdb_compile(self,
session,
fin_json,
result_str='miopen_find_compile_result',
check_str='find_compiled'):
"""retrieve find db compile json results"""
success = False
for fdb_obj in fin_json[result_str]:
if fdb_obj[check_str]:
fdb_entry = self.compose_fdb_entry(session, fin_json, fdb_obj)
if fdb_obj['reason'] == 'Success':
success = self.compose_kernel_entry(fdb_obj, fdb_entry)
session.add(fdb_entry)
self.logger.info('Updating find Db(Build) for job_id=%s', self.job.id)
else:
# JD: add info about reason to the logs table
fdb_entry.valid = False
else:
self.logger.warning("Failed find_db compile, cfg_id: %s, obj: %s",
fin_json['config_tuna_id'], fdb_obj)
try:
session.commit()
except OperationalError as err:
self.logger.warning('FinEval: Unable to update Database: %s', err)
success = False
return success
def update_pdb_config(self, session, layout, data_type, bias):
""" update and retrieve perf_config entry from mysql """
perf_config_table = self.dbt.perf_config_table
perf_config_dict = {
'layout': layout,
'data_type': data_type,
'bias': bias,
'config': self.config.id,
'session': self.dbt.session.id
}
self.logger.info('Updating %s for job_id=%s',
perf_config_table.__tablename__, self.job.id)
res = session.query(perf_config_table).filter_by(**perf_config_dict).all()
if not res:
session.add(perf_config_table(**perf_config_dict))
session.commit()
perf_config_entry = session.query(perf_config_table).filter_by(
**perf_config_dict).one()
return perf_config_entry
def update_pdb_entry(self, session, solver, layout, data_type, bias, params):
""" update and retrieve perf_db entry from mysql """
perf_table = self.dbt.perf_db_table
perf_config_entry = self.update_pdb_config(session, layout, data_type, bias)
perf_db_dict = {
'solver': solver,
'miopen_config': perf_config_entry.id,
'session': self.dbt.session.id
}
update_dict = {'params': params, 'session': self.dbt.session.id}
self.logger.info('Updating %s for job_id=%s', perf_table.__tablename__,
self.job.id)
num_rows = session.query(perf_table).filter_by(
**perf_db_dict).update(update_dict)
perf_db_dict.update(update_dict)
if num_rows == 0:
self.logger.info('insert %s for job_id=%s', perf_db_dict, self.job.id)
session.add(perf_table(**perf_db_dict))
else:
self.logger.info('%u update %s for job_id=%s', num_rows, perf_db_dict,
self.job.id)
session.commit()
query = session.query(perf_table).filter_by(**perf_db_dict)
perf_entry = query.one()
return perf_config_entry, perf_entry
def queue_end_reset(self):
"""resets end queue flag"""
with self.bar_lock:
self.end_jobs.value = 0
def load_job_queue(self, session, ids):
"""load job_queue with info for job ids"""
job_cfgs = session.query(self.dbt.job_table, self.dbt.config_table)\
.filter(self.dbt.job_table.valid == 1)\
.filter(self.dbt.job_table.session == self.dbt.session.id)\
.filter(self.dbt.config_table.id == self.dbt.job_table.config)\
.filter(self.dbt.job_table.id.in_(ids)).all()
if len(ids) != len(job_cfgs):
raise Exception(
'Failed to load job queue. #ids: {} - #job_cgfs: {}'.format(
len(ids), len(job_cfgs)))
for job, config in job_cfgs:
if job.solver:
query = session.query(self.dbt.solver_table)\
.filter(self.dbt.solver_table.session == self.dbt.session.id)\
.filter(self.dbt.solver_table.solver == job.solver)
solver = query.one()
else:
query = session.query(self.dbt.solver_app, self.dbt.solver_table)\
.filter(self.dbt.solver_app.session == self.dbt.session.id)\
.filter(self.dbt.solver_app.applicable == 1)\
.filter(self.dbt.solver_table.tunable == 1)\
.filter(self.dbt.solver_app.config == job.config)\
.filter(self.dbt.solver_app.solver == self.dbt.solver_table.id)\
.filter(self.dbt.solver_table.tunable == 1)
app_solver_desc = query.all()
ids = [solver.id for _, solver in app_solver_desc]
solver = self.dbt.solver_table()
if ids:
solver.tunable = 1
else:
self.logger.warning(
"No applicable & tunable solvers found: id %s, solver %s, config %s",
job.id, job.solver, job.config)
solver.tunable = 0
self.job_queue.put((job, config, solver))
self.logger.info("Put job %s %s %s", job.id, job.state, job.reason)
#pylint: disable=too-many-branches
def get_job(self, find_state, set_state, imply_end):
"""Interface function to get new job for builder/evaluator"""
for idx in range(NUM_SQL_RETRIES):
try:
with self.queue_lock:
if imply_end and self.end_jobs.value > 0:
self.logger.warning('No %s jobs found, skip query', find_state)
return False
if self.job_queue.empty():
ids = ()
with DbSession() as session:
query = self.compose_query(find_state, session)
job_cfgs = query.all()
if not job_cfgs:
# we are done
self.logger.warning('No %s jobs found, fin_step: %s',
find_state, self.fin_steps)
if imply_end:
self.logger.warning("set end")
self.end_jobs.value = 1
return False
ids = tuple([str(job_row.id) for job_row, _ in job_cfgs])
self.logger.info("%s jobs %s", find_state, ids)
if set_state == "eval_start":
session.query(self.dbt.job_table).filter(
self.dbt.job_table.id.in_(ids)).update(
{
self.dbt.job_table.state: set_state,
self.dbt.job_table.eval_mid: self.machine.id,
self.dbt.job_table.gpu_id: self.gpu_id
},
synchronize_session='fetch')
else:
#note for a compile job gpu_id is an index 0 tuna process number, not a gpu
session.query(self.dbt.job_table).filter(
self.dbt.job_table.id.in_(ids)).update(
{
self.dbt.job_table.state: set_state,
self.dbt.job_table.machine_id: self.machine.id,
self.dbt.job_table.gpu_id: self.gpu_id
},
synchronize_session='fetch')
session.commit()
self.load_job_queue(session, ids)
#also in queue_lock
self.job, self.config, self.solver = self.job_queue.get(True, 1)
self.config_dict = compose_config_obj(self.config)
self.logger.info("Got job %s %s %s", self.job.id, self.job.state,
self.job.reason)
return True
except OperationalError as error:
self.logger.warning('%s, Db contention, sleeping ...', error)
sleep(random.randint(1, 30))
except IntegrityError as error:
session.rollback()
self.logger.warning(
'Attempt %s to update job (host = %s, worker = %s) failed (%s), retrying ... ',
idx, self.hostname, self.gpu_id, error)
sleep(5)
except queue.Empty:
self.logger.warning('Shared job queue empty, retrying ... ')
self.logger.error(
'%s retries exhausted to update job status (host = %s, worker = %s), exiting ... ',
NUM_SQL_RETRIES, self.hostname, self.gpu_id)
return False
# JD: This should take a session obj as an input to remove the creation of an extraneous session
def set_job_state(self, state, increment_retries=False):
"""Interface function to update job state for builder/evaluator"""
self.logger.info('Setting job id %s state to %s', self.job.id, state)
for idx in range(NUM_SQL_RETRIES):
with DbSession() as session:
try:
if state in ["running", "compiling", "evaluating"]:
session.query(self.dbt.job_table).filter(
self.dbt.job_table.id == self.job.id).update({
self.dbt.job_table.state: state,
})
else:
if increment_retries:
session.query(self.dbt.job_table).filter(
self.dbt.job_table.id == self.job.id).update({
self.dbt.job_table.state: state,
self.dbt.job_table.retries: self.dbt.job_table.retries + 1
})
else:
# JD: When would this happen ?
# also this is a side-effect, not cool
cache = '~/.cache/miopen_'
blurr = ''.join(
random.choice(string.ascii_lowercase) for i in range(10))
cache_loc = cache + blurr
session.query(self.dbt.job_table).filter(
self.dbt.job_table.id == self.job.id).update({
self.dbt.job_table.state: state,
self.dbt.job_table.cache_loc: cache_loc
})
session.commit()
return True
except OperationalError as error:
self.logger.warning('%s, Db contention, attempt %s, sleeping ...',
error, idx)
sleep(random.randint(1, 30))
except IntegrityError as error:
session.rollback()
self.logger.warning(
'Attempt to update job state (job_id = %s) failed', self.job.id)
self.logger.warning(error)
return False
self.logger.error(
'%s retries exhausted to update job status (host = %s, worker = %s), exiting ... ',
NUM_SQL_RETRIES, self.hostname, self.gpu_id)
return False
def exec_command(self, cmd):
"""execute on native machine"""
ret_code, out, err = self.cnx.exec_command(cmd, timeout=LOG_TIMEOUT)
if err is not None and hasattr(err, 'channel'):
self.logger.info(err)
err.channel.settimeout(LOG_TIMEOUT)
return ret_code, out, err
def exec_docker_cmd(self, cmd):
"""forward command execution to machine method"""
ret_code, out, err = self.machine.exec_command(cmd,
docker_name=self.docker_name,
timeout=LOG_TIMEOUT)
if out:
out = out.read().strip()
if not out and err:
self.logger.info('Error executing docker cmd: %s \n err: %s', cmd,
err.read())
if err is not None and hasattr(err, 'channel'):
err.channel.settimeout(LOG_TIMEOUT)
self.logger.info(err)
return ret_code, out, err
def get_branch_hash(self):
"""Interface function to get new branch hash"""
_, out, _ = self.exec_docker_cmd(
"cat /opt/rocm/miopen/include/miopen/version.h "
"| grep MIOPEN_VERSION_TWEAK | cut -d ' ' -f 3")
self.logger.info('Got branch commit hash: %s', out)
return out
def get_rocm_v(self):
"""Interface function to get rocm version info"""
_, out, _ = self.exec_docker_cmd("cat /opt/rocm/.info/version")
self.logger.info('Got rocm version: %s', out)
return out
@staticmethod
def compose_lcl_envmt(solver):
"""Setting up local_envmt var"""
# JD: Move HIP_VISIBLE_DEVICES here
# pylint: disable=too-many-nested-blocks
lcl_envmt = []
solver_id_map, _ = get_solver_ids()
if solver not in FIND_ONLY_EXCEPTION:
lcl_envmt.append("MIOPEN_DEBUG_FIND_ONLY_SOLVER={}".format(
solver_id_map[solver]))
for key, env_var in FIND_ONLY_EXCEPTION.items():
lcl_envmt.append("{}=0".format(env_var))
else:
for key, sol_group in ENV_SLVGRP_MAP.items():
if solver not in sol_group:
lcl_envmt.append("{}=0".format(key))
else:
for item in sol_group:
if item != solver and item in SLV_ENV_MAP:
if solver not in SLV_ENV_MAP or SLV_ENV_MAP[item] != SLV_ENV_MAP[
solver]:
cnstr = "{}=0".format(SLV_ENV_MAP[item])
if cnstr not in lcl_envmt:
lcl_envmt.append(cnstr)
return lcl_envmt
def run_fin_cmd(self):
"""Run a fin command after generating the JSON"""
fin_output = self.machine.make_temp_file()
cmd = []
env_str = " ".join(self.envmt)
cmd.append(env_str)
cmd.extend(
['/opt/rocm/bin/fin', '-i',
self.get_fin_input(), '-o', fin_output]) # pylint: disable=no-member
for i in range(MAX_JOB_RETRIES):
ret_code, _, err = self.exec_docker_cmd(cmd)
if ret_code != 0:
self.logger.error('Error executing command: %s', ' '.join(cmd))
if err:
err_str = err.read()
self.logger.error('%s : %s', ret_code, err_str)
if "disk I/O error" in err_str:
self.logger.error('fin retry : %u', i)
sleep(random.randint(1, 10))
else:
break
else:
self.logger.error('err code : %s', ret_code)
break
else:
break
if ret_code != 0:
return None
# load the output json file and strip the env
fin_json = json.loads(self.machine.read_file(fin_output))[1:]
assert len(fin_json) == 1
# JD: if we implement multiple jobs per fin launch, this would be a loop
fin_json = fin_json[0]
return fin_json
def run_driver_cmd(self):
"""Definition of running the MIOpen driver cmd"""
sub_cmd = PREC_TO_CMD[self.config_type][self.config.input_t.data_type]
bash_cmd = 'MIOpenDriver {} -V 0 -i 1'.format(sub_cmd)
driver_args = self.config_dict
if "direction" in driver_args:
driver_args['direction'] = INVERS_DIR_MAP[driver_args['direction']]
for field, val in driver_args.items():
if val is None:
continue
if sub_cmd in TENSOR_PRECISION.keys():
if field in TABLE_COLS_CONV_INVMAP.keys():
arg_name = TABLE_COLS_CONV_INVMAP[field]
bash_cmd += " -{} {}".format(arg_name, val)
elif sub_cmd in ['CBAInfer', 'CBAInferfp16']:
if field in TABLE_COLS_FUSION_INVMAP.keys():
arg_name = TABLE_COLS_FUSION_INVMAP[field]
bash_cmd += " -{} {}".format(arg_name, val)
lcl_envmt = self.envmt[:]
# solver = self.job.solver if self.job.solver and not self.job.solver == '' else None
if self.job.solver: # None and empty string are both false
self.logger.info('Solver specified, filter using MIOpen env vars: %s',
self.job.solver)
lcl_envmt.extend(self.compose_lcl_envmt(self.job.solver))
# create environment string for the command to execute,
# remote ssh is rejecting the env setting using dicts
export_all = ["{}".format(x) for x in lcl_envmt]
env_str = " ".join(export_all)
bash_cmd = bash_cmd + ' 2>&1 '
# p = os.path.join('/home',self.user, 'MLOpen')
cmd = "{env} {wrk}".format(env=env_str, wrk=bash_cmd)
self.logger.warning("Machine: %s, GPU ID: %s, Executing: %s", self.hostname,
self.gpu_id, cmd)
return self.exec_docker_cmd(cmd)
def set_barrier(self, funct, with_timeout):
"""Setting time barrier for Process to define execution timeout"""
if self.barred.value == 0:
# this is the first proc to reach the barrier
with self.bar_lock:
self.barred.value += 1
self.logger.info('Waiting for other instances to pause')
wait_cnt = 0
timeout = False
while self.barred.value < self.num_procs.value:
sleep(10)
if with_timeout and self.barred.value == 1:
wait_cnt += 1
timeout = True
if wait_cnt > 180:
break
if timeout:
self.logger.warning(
'Timed out waiting for hung process, proceeding ... ')
else:
self.logger.info('Finished waiting for instances to pause')
funct()
with self.bar_lock:
self.barred.value = 0
return True
return False
def check_wait_barrier(self):
"""Checking time barrier"""
self.logger.info('Checking barrier')
if self.barred.value != 0:
self.logger.info('Blocked procs found')
self.logger.info('Current barrier count: %s', self.barred.value)
with self.bar_lock:
self.barred.value += 1
self.logger.warning('Waiting for processes to finish')
while self.barred.value != 0:
sleep(60)
self.logger.warning('Finished waiting for processes')
return True
return False
def get_compile_jobs(self):
"""Checking num compile jobs left to determine
when the evaluator should stop waiting for jobs to compile"""
with DbSession() as session:
try:
query = session.query(sqlalchemy_func.count(self.dbt.job_table.id))\
.filter(self.dbt.job_table.valid == 1)\
.filter(self.dbt.job_table.session == self.dbt.session.id)\
.filter(self.dbt.job_table.state.in_(('new', 'started', 'compile_start', 'compiling',
'compiled')))
if self.label:
query = query.filter(self.dbt.job_table.reason == self.label)
if self.machine.arch:
query = query.filter(self.dbt.job_table.arch == self.machine.arch)
if self.machine.num_cu:
query = query.filter(self.dbt.job_table.num_cu == self.machine.num_cu)
if self.fin_steps:
query = query.filter(
self.dbt.job_table.fin_step.like('%' + self.fin_steps[0] + '%'))
else:
query = query.filter(self.dbt.job_table.fin_step == 'not_fin')
compile_jobs = query.one()[0]
except IntegrityError as error:
session.rollback()
self.logger.warning('Attempt to get #compile jobs failed')
self.logger.warning(error)
return compile_jobs
def reset_job_state(self):
"""Helper function to reset job state during signal interrupt"""
if self.job and self.job.state != 'compiled' and self.job.state != 'evaluated':
self.logger.warning('resetting job state to %s', self.fetch_state[0])
if "new" in self.fetch_state:
self.set_job_state("new")
if "compiled" in self.fetch_state:
self.set_job_state("compiled")
while not self.job_queue.empty():
try:
self.job, self.config, self.solver = self.job_queue.get(True, 1)
if self.job.state == "compile_start":
self.set_job_state("new")
if self.job.state == "eval_start":
if self.is_fdb:
self.set_job_state("new")
else:
self.set_job_state("compiled")
except queue.Empty:
break
def run(self):
"""Main run function of WorkerInterface Process"""
self.machine.set_logger(self.logger)
try:
self.cnx = self.machine.connect(chk_abort_file)
while True:
self.check_wait_barrier()
if chk_abort_file(self.machine.id, self.logger, self.machine.arch):
with self.bar_lock:
self.num_procs.value -= 1
return False
# re-establish node connection
usage = None
try:
usage = self.machine.getusedspace()
except (socket.timeout, socket.error):
usage = None
if not usage:
self.set_barrier(self.reset_machine, True)
continue
if usage > 90:
# JD: Tell prometheus I am out of disk space
self.logger.warning('Used space overflow detected')
self.set_barrier(lambda: (), True)
continue
# the step member is defined in the derived class
ret = self.step() # pylint: disable=no-member
self.logger.info("proc %s step %s", self.gpu_id, ret)
if not ret and (self.poll_retries > 0 and self.poll_retries < 120):
pass
elif not ret and (self.poll_retries == 0 or self.poll_retries >= 120):
if self.poll_retries >= 120:
self.logger.warning(
'Max poll retries number(120) reached, quiting...')
return False
self.logger.warning('No more steps, quiting...')
return True
except KeyboardInterrupt as err:
self.logger.error('%s', err)
self.reset_job_state()
return False
return True | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/widget/README | -------------------------------------------------------------------------------
dojox.widget Collection
-------------------------------------------------------------------------------
Version 1.0
Release date: 10/31/2007
-------------------------------------------------------------------------------
Project state:
[Calendar] experimental
[CalendarFx] experimental
[ColorPicker] beta
[Dialog] experimental
[DialogSimple] beta
[FeedPortlet] experimental
[FilePicker] experimental
[FisheyeList] experimental
[FisheyeLite] beta
[Iterator] experimental
[Loader] experimental
[Pager] experimental
[Portlet] experimental
[PlaceholderMenuItem] experimental
[Roller] experimental
[RollingList] experimental
[SortList] experimental
[TitleGroup] beta
[Toaster] experimental
[Wizard] experimental
[AnalogGauge] experimental
[BarGauge] experimental
[Standby] experimental
-------------------------------------------------------------------------------
Credits:
[Calendar] Shane O'Sullivan
[CalendarFx] Shane O'Sullivan
[ColorPicker] Peter Higgins (dante)
[Dialog] Peter Higgins (dante)
[DialogSimple] Peter Higgins (dante)
[FeedPortlet] Shane O'Sullivan
[FilePicker] Nathan Toone (toonetown)
[FisheyeList] Karl Tiedt (kteidt)
[FisheyeLite] Peter Higgins (dante)
[Iterator] Alex Russell (slightlyoff)
[Loader] Peter Higgins (dante)
[Pager] Nikolai Onken (nonken), Peter Higgins (dante);
[PlaceholderMenuItem] Nathan Toone (toonetown)
[Portlet] Shane O'Sullivan
[Roller] Peter Higgins (dante)
[RollingList] Nathan Toone (toonetown)
[SortList] Peter Higgins (dante)
[TitleGroup] Peter Higgins (dante)
[Toaster] Adam Peller (peller)
[Wizard] Peter Higgins (dante)
[AnalogGauge] Benjamin Schell (bmschell) CCLA
[BarGauge] Benjamin Schell (bmschell) CCLA
[Standby] Jared Jurkiewicz (jaredj) CCLA
[UpgradeBar] Mike Wilcox (mwilcox), Revin Guillen
-------------------------------------------------------------------------------
Project description
This is a collection of standalone widgets for use in
your website. Each individual widget is independent
of the others.
-------------------------------------------------------------------------------
Dependencies:
Each widget has it's own requirements and dependencies.
Most inherit from dijit base-classes such as dijit._Widget,
dijit._Templated, etc ... So we will assume the availablility
of dojo (core), and dijit packages.
Each individual component stores resources in a folder that shares
a name with the Widget. For instance:
the Dialog lives in
dojox/widget/Dialog.js ...
and the folder:
dojox/widget/Dialog/ contains a 'Dialog.css', the required
styles for that particular widget. All required templates and
images reside in the folder.
This differs slightly from the rest of DojoX in that each other
project uses a shared resources/ folder in the project folder,
though uses the same naming convention for stylesheets and templates.
eg:
dojox/layout/resources/ExpandoPane.css
dojox.layout.ExpandoPane
-------------------------------------------------------------------------------
Documentation
Please refer to the API-tool, or in-line documentation. All of these
widgets are of varying use, quality, and documentation completion.
-------------------------------------------------------------------------------
Installation instructions
These are standalone Widgets, so putting the [widget].js file
in your dojox/widget folder, and copying any files in the
/dojox/widget/[widget]/ folder as supplements/templates/etc
should be all you need to do.
eg: FisheyeList:
/dojox/widget/FisheyeList.js
/dojox/widget/FisheyeList/FisheyeList.css
should be all you need to use the Fisheye widget.
you can safely import the whole widget project into your
dojox/ root directory from the following SVN url:
http://svn.dojotoolkit.org/src/dojox/trunk/widget
-------------------------------------------------------------------------------
Other Notes (Brief widget list):
* ColorPicker - An HSV ColorPicker intended to be a drop down
* Calendar - An extension on the dijit._Calendar providing a different UI
* CalendarFx - additional mixable FX for transitions in dojox.widget.Calendar
* Dialog - An extended version of dijit.Dialog with man options and transition.
* DialogSimple - A simple Dijit Dialog providing `dojox.layout.ContentPane` integration
* FilePicker - a widget for browsing server-side file systems (can use
dojox.data.FileStore as backend store)
* FisheyeList - the classic FishEye Picker (abandoned)
* FisheyeLite - A partial replacement for the FisheyeList - serious performance
gains, and entirely more extensible in that it simply animates defined
properties, relying on the natural styling as a foundation.
* Iterator - Basic array and data store iterator class
* Loader - an experimental Class that listens to XHR
connections in the background, and displays
a loading indicator. Loader will be removed in 1.3, and is (abandoned).
* PlaceholderMenuItem - a menu item that can be used to inject other menu
items at a given location. Extends dijit.Menu directly.
* Roller - A component to show many lines of text in a single area, rotating
through the options available. Also provides RollerSlide, an extension
to the stock fading roller to add a slide animation to the transition.
* RollingList - A component of the FilePicker widget
* SortList - a degradable UL with a fixed header, scrolling,
and sorting. Can be the direct descendant of a
LayoutContainer and will size to fit.
* TitleGroup - A container offering variable height TitlePane access, though
behaves like an AccordionContainer
* Toaster - a messaging system to display unobtrusive
alerts on screen.
* Wizard - a StackContainer with built-in navigation to
ease in the creation of 'step-based' content.
Requires dojo >= 1.1
* AnalogGauge - an analog style customizable gauge for displaying values in an
animated fashion and with multiple indicators. Supports easings for
indicator animations, transparent overlays, etc. Very flexible.
Requires dojo >= 1.3
* BarGauge - a bar style gauge for displaying values in an animated fashion
and with multiple indicators. Supports easings for indicator animations,
etc. Very flexible.
Requires dojo >= 1.3
* Standby - a 'blocker' style widget to overlay a translucent div + image over a DOM node/widget
to indicate busy. Overlay color, image, and alt text can all be customized.
Requires dojo >= 1.3
* UpgradeBar - Displays the "yellow bar" at the top of a page to indicate the user
needs to upgrade their browser or a plugin
Requires dojo >= 1.3 | PypiClean |
/Apywy-0.0.5.tar.gz/Apywy-0.0.5/apywy/static_funcs.py | from typing import Dict, List, Optional, Tuple, Union
from django.conf import settings
from django.urls.resolvers import URLPattern, URLResolver
from .domain.entities import NameSpace, View
BUILDIN_NAMESPACES_TO_IGNORE = ('apywy', 'admin')
USER_DECLARED_NAMESPACES_TO_IGNORE: Tuple = getattr(settings, 'NAMESPACES_TO_IGNORE', tuple())
def check_is_namespace_name_in_ignore(namespace_name: str) -> bool:
'''
проверяет, находится ли namespace с именем namespace_name в игноре для apywy
'''
if USER_DECLARED_NAMESPACES_TO_IGNORE == ('*', ):
return True
namespaces = BUILDIN_NAMESPACES_TO_IGNORE + USER_DECLARED_NAMESPACES_TO_IGNORE
return namespace_name in namespaces
def get_all_urlpatterns() -> List[Union[URLPattern, URLResolver]]:
'''
получить все urlpatterns в проекте
'''
from importlib import import_module
root_urlconf = import_module(settings.ROOT_URLCONF)
return root_urlconf.urlpatterns
def get_all_view_classes(urlpatterns: List[Union[URLPattern, URLResolver]]) -> List[View]:
'''
Для всех переданных urlpatterns получить их вьюшки. Работает рекурсивно,
если встретили URLResolver.
'''
VIEW_CLASSES = []
# при двух разных юрлах на одну вьюшку, схема будет дублироваться. Это мы избегаем
already_added_django_views = set()
namespace: Optional[NameSpace] = None
_root: Optional[str] = None
def inner(urlpatterns: List[Union[URLPattern, URLResolver]]) -> List[View]:
nonlocal namespace, _root
for pattern in urlpatterns:
if isinstance(pattern, URLResolver):
namespace_name = str(pattern.namespace)
try:
_root = pattern.pattern._route
except AttributeError:
_root = None
if not check_is_namespace_name_in_ignore(namespace_name=namespace_name):
namespace = NameSpace(namespace_name=namespace_name)
inner(pattern.url_patterns)
elif isinstance(pattern, URLPattern):
try:
# мы не умеем работать с func-based views, поэтому просто их скипаем
django_view_class = pattern.callback.view_class
except AttributeError:
continue
path_to_view = pattern.pattern
path_to_view._root = _root
view_class = View(django_view_class=django_view_class, url_path=path_to_view)
if django_view_class in already_added_django_views:
view_class.append_url_path(url_path=path_to_view)
continue
already_added_django_views.add(django_view_class)
namespace.append(view_class) # type: ignore
VIEW_CLASSES.append(view_class)
return VIEW_CLASSES
return inner(urlpatterns)
def get_paths_data_of_view(view: View) -> List[Dict]:
'''
получить список полных путей и их наименовай до всех юрлов у вьюшки
'''
result = []
for url_path in view.url_paths:
url_path_data = {}
url_path_data['url_name'] = url_path.name
url_path_data['url_full_path'] = url_path._root + str(url_path)
result.append(url_path_data)
return result | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/snippets/vala.js | ace.define("ace/snippets/vala",["require","exports","module"], function(require, exports, module) {
"use strict";
exports.snippets = [
{
"content": "case ${1:condition}:\n\t$0\n\tbreak;\n",
"name": "case",
"scope": "vala",
"tabTrigger": "case"
},
{
"content": "/**\n * ${6}\n */\n${1:public} class ${2:MethodName}${3: : GLib.Object} {\n\n\t/**\n\t * ${7}\n\t */\n\tpublic ${2}(${4}) {\n\t\t${5}\n\t}\n\n\t$0\n}",
"name": "class",
"scope": "vala",
"tabTrigger": "class"
},
{
"content": "(${1}) => {\n\t${0}\n}\n",
"name": "closure",
"scope": "vala",
"tabTrigger": "=>"
},
{
"content": "/*\n * $0\n */",
"name": "Comment (multiline)",
"scope": "vala",
"tabTrigger": "/*"
},
{
"content": "Console.WriteLine($1);\n$0",
"name": "Console.WriteLine (writeline)",
"scope": "vala",
"tabTrigger": "writeline"
},
{
"content": "[DBus(name = \"$0\")]",
"name": "DBus annotation",
"scope": "vala",
"tabTrigger": "[DBus"
},
{
"content": "delegate ${1:void} ${2:DelegateName}($0);",
"name": "delegate",
"scope": "vala",
"tabTrigger": "delegate"
},
{
"content": "do {\n\t$0\n} while ($1);\n",
"name": "do while",
"scope": "vala",
"tabTrigger": "dowhile"
},
{
"content": "/**\n * $0\n */",
"name": "DocBlock",
"scope": "vala",
"tabTrigger": "/**"
},
{
"content": "else if ($1) {\n\t$0\n}\n",
"name": "else if (elseif)",
"scope": "vala",
"tabTrigger": "elseif"
},
{
"content": "else {\n\t$0\n}",
"name": "else",
"scope": "vala",
"tabTrigger": "else"
},
{
"content": "enum {$1:EnumName} {\n\t$0\n}",
"name": "enum",
"scope": "vala",
"tabTrigger": "enum"
},
{
"content": "public errordomain ${1:Error} {\n\t$0\n}",
"name": "error domain",
"scope": "vala",
"tabTrigger": "errordomain"
},
{
"content": "for ($1;$2;$3) {\n\t$0\n}",
"name": "for",
"scope": "vala",
"tabTrigger": "for"
},
{
"content": "foreach ($1 in $2) {\n\t$0\n}",
"name": "foreach",
"scope": "vala",
"tabTrigger": "foreach"
},
{
"content": "Gee.ArrayList<${1:G}>($0);",
"name": "Gee.ArrayList",
"scope": "vala",
"tabTrigger": "ArrayList"
},
{
"content": "Gee.HashMap<${1:K},${2:V}>($0);",
"name": "Gee.HashMap",
"scope": "vala",
"tabTrigger": "HashMap"
},
{
"content": "Gee.HashSet<${1:G}>($0);",
"name": "Gee.HashSet",
"scope": "vala",
"tabTrigger": "HashSet"
},
{
"content": "if ($1) {\n\t$0\n}",
"name": "if",
"scope": "vala",
"tabTrigger": "if"
},
{
"content": "interface ${1:InterfaceName}{$2: : SuperInterface} {\n\t$0\n}",
"name": "interface",
"scope": "vala",
"tabTrigger": "interface"
},
{
"content": "public static int main(string [] argv) {\n\t${0}\n\treturn 0;\n}",
"name": "Main function",
"scope": "vala",
"tabTrigger": "main"
},
{
"content": "namespace $1 {\n\t$0\n}\n",
"name": "namespace (ns)",
"scope": "vala",
"tabTrigger": "ns"
},
{
"content": "stdout.printf($0);",
"name": "printf",
"scope": "vala",
"tabTrigger": "printf"
},
{
"content": "${1:public} ${2:Type} ${3:Name} {\n\tset {\n\t\t$0\n\t}\n\tget {\n\n\t}\n}",
"name": "property (prop)",
"scope": "vala",
"tabTrigger": "prop"
},
{
"content": "${1:public} ${2:Type} ${3:Name} {\n\tget {\n\t\t$0\n\t}\n}",
"name": "read-only property (roprop)",
"scope": "vala",
"tabTrigger": "roprop"
},
{
"content": "@\"${1:\\$var}\"",
"name": "String template (@)",
"scope": "vala",
"tabTrigger": "@"
},
{
"content": "struct ${1:StructName} {\n\t$0\n}",
"name": "struct",
"scope": "vala",
"tabTrigger": "struct"
},
{
"content": "switch ($1) {\n\t$0\n}",
"name": "switch",
"scope": "vala",
"tabTrigger": "switch"
},
{
"content": "try {\n\t$2\n} catch (${1:Error} e) {\n\t$0\n}",
"name": "try/catch",
"scope": "vala",
"tabTrigger": "try"
},
{
"content": "\"\"\"$0\"\"\";",
"name": "Verbatim string (\"\"\")",
"scope": "vala",
"tabTrigger": "verbatim"
},
{
"content": "while ($1) {\n\t$0\n}",
"name": "while",
"scope": "vala",
"tabTrigger": "while"
}
];
exports.scope = "";
}); | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/4.4/fs.js | var fs = {};
/**
* Asynchronous rename(2). No arguments other than a possible exception are
* given to the completion callback.
* @param oldPath
* @param newPath
* @param callback
*/
fs.rename = function(oldPath, newPath, callback) {}
/**
* Synchronous versions of [fs.write()][]. Returns the number of bytes
* written.
* @param fd
* @param data
* @param position
* @param encoding
* @returns {Number} the number of bytes written
*/
fs.writeSync = function(fd, data, position, encoding) {}
/**
* Synchronous versions of [fs.write()][]. Returns the number of bytes
* written.
* @param fd
* @param data
* @param position
* @param encoding
* @returns {Number} the number of bytes written
*/
fs.writeSync = function(fd, data, position, encoding) {}
/**
* WriteStream is a [Writable Stream][].
* @constructor
*/
fs.WriteStream = function() {}
/**
* The number of bytes written so far. Does not include data that is still
* queued for writing.
*/
fs.WriteStream.prototype.bytesWritten = 0;
/**
* The path to the file the stream is writing to.
*/
fs.WriteStream.prototype.path = 0;
/** @__local__ */ fs.WriteStream.__events__ = {};
/**
* Emitted when the WriteStream's file is opened.
* @param fd {Number}
*/
fs.WriteStream.__events__.open = function(fd) {};
/**
* Synchronous chmod(2). Returns undefined.
* @param path
* @param mode
* @returns undefined
*/
fs.chmodSync = function(path, mode) {}
/**
* Objects returned from [fs.stat()][], [fs.lstat()][] and [fs.fstat()][]
* and their synchronous counterparts are of this type.
* @constructor
*/
fs.Stats = function() {}
/**
* Asynchronous chmod(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param mode
* @param callback
*/
fs.chmod = function(path, mode, callback) {}
/**
* Synchronous readdir(3). Returns an array of filenames excluding
* '.' and '..'.
* @param path
* @returns {Array} an array of filenames excluding '.' and '..'
*/
fs.readdirSync = function(path) {}
/**
* Synchronous readlink(2). Returns the symbolic link's string value.
* @param path
* @returns {String} the symbolic link's string value
*/
fs.readlinkSync = function(path) {}
/**
* Synchronous close(2). Returns undefined.
* @param fd
* @returns undefined
*/
fs.closeSync = function(fd) {}
/**
* Asynchronous close(2). No arguments other than a possible exception are
* given to the completion callback.
* @param fd
* @param callback
*/
fs.close = function(fd, callback) {}
/**
* Asynchronous file open. See open(2). flags can be:
* @param path
* @param flags
* @param mode
* @param callback
*/
fs.open = function(path, flags, mode, callback) {}
/**
* Synchronous lstat(2). Returns an instance of fs.Stats.
* @param path
* @returns {fs.Stats} an instance of fs.Stats
*/
fs.lstatSync = function(path) {}
/**
* Synchronous link(2). Returns undefined.
* @param srcpath
* @param dstpath
* @returns undefined
*/
fs.linkSync = function(srcpath, dstpath) {}
/**
* Synchronous stat(2). Returns an instance of [fs.Stats][].
* @param path
* @returns {fs.Stats} an instance of fs.Stats
*/
fs.statSync = function(path) {}
/**
* Asynchronous mkdir(2). No arguments other than a possible exception are
* given to the completion callback. mode defaults to 0o777.
* @param path
* @param mode=0o777 {Number}
* @param callback
*/
fs.mkdir = function(path, mode, callback) {}
/**
* Asynchronously reads the entire contents of a file. Example:
* @param file {String | Integer}
* @param options {Object | String}
* @param callback {Function}
*/
fs.readFile = function(file, options, callback) {}
/**
* Write buffer to the file specified by fd.
* @param fd
* @param buffer
* @param offset
* @param length
* @param position
* @param callback
*/
fs.write = function(fd, buffer, offset, length, position, callback) {}
/**
* Write data to the file specified by fd. If data is not a Buffer instance
* then the value will be coerced to a string.
* @param fd
* @param data
* @param position
* @param encoding
* @param callback
*/
fs.write = function(fd, data, position, encoding, callback) {}
/**
* Synchronous realpath(2). Returns the resolved path. cache is an object
* literal of mapped paths that can be used to force a specific path
* resolution or avoid additional fs.stat calls for known real paths.
* @param path
* @param cache
* @returns the resolved path
*/
fs.realpathSync = function(path, cache) {}
/**
* Asynchronously writes data to a file, replacing the file if it already
* exists.
* @param file {String | Integer}
* @param data {String | Buffer}
* @param options {Object | String}
* @param callback {Function}
*/
fs.writeFile = function(file, data, options, callback) {}
/**
* Asynchronous rmdir(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param callback
*/
fs.rmdir = function(path, callback) {}
/**
* Stop watching for changes on filename. If listener is specified, only
* that particular listener is removed. Otherwise, all listeners are
* removed and you have effectively stopped watching filename.
* @param filename
* @param listener
*/
fs.unwatchFile = function(filename, listener) {}
/**
* Asynchronous fstat(2). The callback gets two arguments (err, stats)
* where stats is a fs.Stats object. fstat() is identical to [stat()][],
* except that the file to be stat-ed is specified by the file descriptor
* fd.
* @param fd
* @param callback
*/
fs.fstat = function(fd, callback) {}
/**
* ReadStream is a [Readable Stream][].
* @constructor
*/
fs.ReadStream = function() {}
/**
* The path to the file the stream is reading from.
*/
fs.ReadStream.prototype.path = 0;
/** @__local__ */ fs.ReadStream.__events__ = {};
/**
* Emitted when the ReadStream's file is opened.
* @param fd {Number}
*/
fs.ReadStream.__events__.open = function(fd) {};
/**
* Asynchronous realpath(2). The callback gets two arguments (err,
* resolvedPath). May use process.cwd to resolve relative paths. cache is
* an object literal of mapped paths that can be used to force a specific
* path resolution or avoid additional fs.stat calls for known real paths.
* @param path
* @param cache
* @param callback
*/
fs.realpath = function(path, cache, callback) {}
/**
* Asynchronous stat(2). The callback gets two arguments (err, stats) where
* stats is a [fs.Stats][] object. See the [fs.Stats][] section for more
* information.
* @param path
* @param callback
*/
fs.stat = function(path, callback) {}
/**
* Synchronous version of [fs.read()][]. Returns the number of bytesRead.
* @param fd
* @param buffer
* @param offset
* @param length
* @param position
* @returns {Number} the number of bytesRead
*/
fs.readSync = function(fd, buffer, offset, length, position) {}
/**
* Asynchronous truncate(2). No arguments other than a possible exception
* are given to the completion callback. A file descriptor can also be
* passed as the first argument. In this case, fs.ftruncate() is called.
* @param path
* @param len
* @param callback
*/
fs.truncate = function(path, len, callback) {}
/**
* Asynchronous lstat(2). The callback gets two arguments (err, stats)
* where stats is a fs.Stats object. lstat() is identical to stat(), except
* that if path is a symbolic link, then the link itself is stat-ed, not
* the file that it refers to.
* @param path
* @param callback
*/
fs.lstat = function(path, callback) {}
/**
* Synchronous fstat(2). Returns an instance of fs.Stats.
* @param fd
* @returns {fs.Stats} an instance of fs.Stats
*/
fs.fstatSync = function(fd) {}
/**
* The synchronous version of [fs.writeFile()][]. Returns undefined.
* @param file
* @param data
* @param options
* @returns undefined
*/
fs.writeFileSync = function(file, data, options) {}
/**
* Asynchronous symlink(2). No arguments other than a possible exception
* are given to the completion callback.
* @param target
* @param path
* @param type
* @param callback
*/
fs.symlink = function(target, path, type, callback) {}
/**
* Synchronous symlink(2). Returns undefined.
* @param target
* @param path
* @param type
* @returns undefined
*/
fs.symlinkSync = function(target, path, type) {}
/**
* Synchronous rmdir(2). Returns undefined.
* @param path
* @returns undefined
*/
fs.rmdirSync = function(path) {}
/**
* Asynchronous link(2). No arguments other than a possible exception are
* given to the completion callback.
* @param srcpath
* @param dstpath
* @param callback
*/
fs.link = function(srcpath, dstpath, callback) {}
/**
* Asynchronous readdir(3). Reads the contents of a directory.
* @param path
* @param callback
*/
fs.readdir = function(path, callback) {}
/**
* Returns a new [ReadStream][] object. (See [Readable Stream][]).
* @param path
* @param options
* @returns {stream.ReadableStream}
*/
fs.createReadStream = function(path, options) {}
/**
* Synchronous version of [fs.readFile][]. Returns the contents of the
* file.
* @param file
* @param options
* @returns the contents of the file
*/
fs.readFileSync = function(file, options) {}
/**
* Asynchronous unlink(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param callback
*/
fs.unlink = function(path, callback) {}
/**
* Synchronous truncate(2). Returns undefined.
* @param path
* @param len
* @returns undefined
*/
fs.truncateSync = function(path, len) {}
/**
* Read data from the file specified by fd.
* @param fd
* @param buffer
* @param offset
* @param length
* @param position
* @param callback
*/
fs.read = function(fd, buffer, offset, length, position, callback) {}
/**
* Synchronous rename(2). Returns undefined.
* @param oldPath
* @param newPath
* @returns undefined
*/
fs.renameSync = function(oldPath, newPath) {}
/**
* Synchronous mkdir(2). Returns undefined.
* @param path
* @param mode
* @returns undefined
*/
fs.mkdirSync = function(path, mode) {}
/**
* Watch for changes on filename. The callback listener will be called each
* time the file is accessed.
* @param filename
* @param options
* @param listener
*/
fs.watchFile = function(filename, options, listener) {}
/**
* Returns a new [WriteStream][] object. (See [Writable Stream][]).
* @param path
* @param options
* @returns {stream.WritableStream}
*/
fs.createWriteStream = function(path, options) {}
/**
* Synchronous version of [fs.open()][]. Returns an integer representing
* the file descriptor.
* @param path
* @param flags
* @param mode
* @returns an integer representing the file descriptor
*/
fs.openSync = function(path, flags, mode) {}
/**
* Asynchronous readlink(2). The callback gets two arguments (err,
* linkString).
* @param path
* @param callback
*/
fs.readlink = function(path, callback) {}
/**
* Synchronous unlink(2). Returns undefined.
* @param path
* @returns undefined
*/
fs.unlinkSync = function(path) {}
/**
* Tests a user's permissions for the file specified by path. mode is
* an optional integer that specifies the accessibility checks to be
* performed. The following constants define the possible values of mode.
* It is possible to create a mask consisting of the bitwise OR of two or
* more values.
* @param path
* @param mode
* @param callback
*/
fs.access = function(path, mode, callback) {}
/**
* Synchronous version of [fs.access()][]. This throws if any accessibility
* checks fail, and does nothing otherwise.
* @param path
* @param mode
*/
fs.accessSync = function(path, mode) {}
/**
* Asynchronously append data to a file, creating the file if it does not
* yet exist.
* @param file {String|Number}
* @param data {String|Buffer}
* @param options {Object|String}
* @param callback {Function}
*/
fs.appendFile = function(file, data, options, callback) {}
/**
* The synchronous version of [fs.appendFile()][]. Returns undefined.
* @param file
* @param data
* @param options
* @returns undefined
*/
fs.appendFileSync = function(file, data, options) {}
/**
* Asynchronous chown(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param uid
* @param gid
* @param callback
*/
fs.chown = function(path, uid, gid, callback) {}
/**
* Synchronous chown(2). Returns undefined.
* @param path
* @param uid
* @param gid
* @returns undefined
*/
fs.chownSync = function(path, uid, gid) {}
/**
* Test whether or not the given path exists by checking with the file
* system.
* @param path
* @param callback
*/
fs.exists = function(path, callback) {}
/**
* Synchronous version of [fs.exists()][].
* @param path
*/
fs.existsSync = function(path) {}
/**
* Asynchronous fchmod(2). No arguments other than a possible exception are
* given to the completion callback.
* @param fd
* @param mode
* @param callback
*/
fs.fchmod = function(fd, mode, callback) {}
/**
* Synchronous fchmod(2). Returns undefined.
* @param fd
* @param mode
* @returns undefined
*/
fs.fchmodSync = function(fd, mode) {}
/**
* Asynchronous fchown(2). No arguments other than a possible exception are
* given to the completion callback.
* @param fd
* @param uid
* @param gid
* @param callback
*/
fs.fchown = function(fd, uid, gid, callback) {}
/**
* Synchronous fchown(2). Returns undefined.
* @param fd
* @param uid
* @param gid
* @returns undefined
*/
fs.fchownSync = function(fd, uid, gid) {}
/**
* Objects returned from fs.watch() are of this type.
* @constructor
*/
fs.FSWatcher = function() {}
fs.FSWatcher.prototype = new events.EventEmitter();
/**
* Stop watching for changes on the given fs.FSWatcher.
*/
fs.FSWatcher.prototype.close = function() {}
/** @__local__ */ fs.FSWatcher.__events__ = {};
/**
* Emitted when something changes in a watched directory or file. See more
* details in [fs.watch()][].
* @param event {String}
* @param filename {String}
*/
fs.FSWatcher.__events__.change = function(event, filename) {};
/**
* Emitted when an error occurs.
* @param exception {Error}
*/
fs.FSWatcher.__events__.error = function(exception) {};
/**
* Asynchronous fsync(2). No arguments other than a possible exception are
* given to the completion callback.
* @param fd
* @param callback
*/
fs.fsync = function(fd, callback) {}
/**
* Synchronous fsync(2). Returns undefined.
* @param fd
* @returns undefined
*/
fs.fsyncSync = function(fd) {}
/**
* Asynchronous ftruncate(2). No arguments other than a possible exception
* are given to the completion callback.
* @param fd
* @param len
* @param callback
*/
fs.ftruncate = function(fd, len, callback) {}
/**
* Synchronous ftruncate(2). Returns undefined.
* @param fd
* @param len
* @returns undefined
*/
fs.ftruncateSync = function(fd, len) {}
/**
* Change the file timestamps of a file referenced by the supplied file
* descriptor.
* @param fd
* @param atime
* @param mtime
* @param callback
*/
fs.futimes = function(fd, atime, mtime, callback) {}
/**
* Synchronous version of [fs.futimes()][]. Returns undefined.
* @param fd
* @param atime
* @param mtime
* @returns undefined
*/
fs.futimesSync = function(fd, atime, mtime) {}
/**
* Asynchronous lchmod(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param mode
* @param callback
*/
fs.lchmod = function(path, mode, callback) {}
/**
* Synchronous lchmod(2). Returns undefined.
* @param path
* @param mode
* @returns undefined
*/
fs.lchmodSync = function(path, mode) {}
/**
* Asynchronous lchown(2). No arguments other than a possible exception are
* given to the completion callback.
* @param path
* @param uid
* @param gid
* @param callback
*/
fs.lchown = function(path, uid, gid, callback) {}
/**
* Synchronous lchown(2). Returns undefined.
* @param path
* @param uid
* @param gid
* @returns undefined
*/
fs.lchownSync = function(path, uid, gid) {}
/**
* Change file timestamps of the file referenced by the supplied path.
* @param path
* @param atime
* @param mtime
* @param callback
*/
fs.utimes = function(path, atime, mtime, callback) {}
/**
* Synchronous version of [fs.utimes()][]. Returns undefined.
* @param path
* @param atime
* @param mtime
* @returns undefined
*/
fs.utimesSync = function(path, atime, mtime) {}
/**
* Watch for changes on filename, where filename is either a file or a
* directory. The returned object is a [fs.FSWatcher][].
* @param filename
* @param options
* @param listener
* @returns {fs.FSWatcher}
*/
fs.watch = function(filename, options, listener) {}
/* see http://nodejs.org/docs/v0.6.12/api/fs.html#fs.Stats */
fs.Stats.prototype = {
isFile: function() {},
isDirectory: function() {},
isBlockDevice: function() {},
isCharacterDevice: function() {},
isSymbolicLink: function() {},
isFIFO: function() {},
isSocket: function() {},
};
/* required for createReadStream() / createWriteStream() */
var stream = require('stream');
var events = require('events');
exports = fs; | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/diag_scripts/ocean/diagnostic_maps_quad.py | import logging
import os
import sys
import iris
import iris.quickplot as qplt
import matplotlib.pyplot as plt
import numpy as np
from esmvaltool.diag_scripts.ocean import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
from esmvaltool.diag_scripts.shared import ProvenanceLogger
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def add_map_subplot(subplot, cube, nspace, title='', cmap=''):
"""
Add a map subplot to the current pyplot figure.
Parameters
----------
subplot: int
The matplotlib.pyplot subplot number. (ie 221)
cube: iris.cube.Cube
the iris cube to be plotted.
nspace: numpy.array
An array of the ticks of the colour part.
title: str
A string to set as the subplot title.
cmap: str
A string to describe the matplotlib colour map.
"""
plt.subplot(subplot)
qplot = qplt.contourf(cube, nspace, linewidth=0,
cmap=plt.cm.get_cmap(cmap))
qplot.colorbar.set_ticks([nspace.min(),
(nspace.max() + nspace.min()) / 2.,
nspace.max()])
plt.gca().coastlines()
plt.title(title)
def multi_model_maps(
cfg,
input_files,
):
"""
Make the four pane model vs model vs obs comparison plot.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
input_files: dict
the metadata dictionairy
"""
filenames = {}
ctl_key = 'control_model'
exp_key = 'exper_model'
obs_key = 'observational_dataset'
model_types = [ctl_key, exp_key, obs_key]
for model_type in model_types:
logger.debug(model_type, cfg[model_type])
filenames[model_type] = diagtools.match_model_to_key(model_type,
cfg[model_type],
input_files)
# ####
# Load the data for each layer as a separate cube
layers = {}
cubes = {}
for model_type, input_file in filenames.items():
cube = iris.load_cube(input_file)
cube = diagtools.bgc_units(cube, input_files[input_file]['short_name'])
cubes[model_type] = diagtools.make_cube_layer_dict(cube)
for layer in cubes[model_type]:
layers[layer] = True
logger.debug('layers: %s', ', '.join(layers))
logger.debug('cubes: %s', ', '.join(cubes.keys()))
# ####
# load names:
exper = input_files[filenames[exp_key]]['dataset']
control = input_files[filenames[ctl_key]]['dataset']
obs = input_files[filenames[obs_key]]['dataset']
long_name = cubes[exp_key][list(layers.keys())[0]].long_name
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Make a plot for each layer
for layer in layers:
fig = plt.figure()
fig.set_size_inches(9, 6)
# Create the cubes
cube221 = cubes[exp_key][layer]
cube222 = cubes[exp_key][layer] - cubes[ctl_key][layer]
cube223 = cubes[ctl_key][layer] - cubes[obs_key][layer]
cube224 = cubes[exp_key][layer] - cubes[obs_key][layer]
# create the z axis for plots 2, 3, 4.
zrange1 = diagtools.get_cube_range([cube221, ])
zrange2 = diagtools.get_cube_range_diff([cube222, cube223, cube224])
linspace1 = np.linspace(zrange1[0], zrange1[1], 12, endpoint=True)
linspace2 = np.linspace(zrange2[0], zrange2[1], 12, endpoint=True)
# Add the sub plots to the figure.
add_map_subplot(221, cube221, linspace1, cmap='viridis', title=exper)
add_map_subplot(222, cube222, linspace2, cmap='bwr',
title=' '.join([exper, 'minus', control]))
add_map_subplot(223, cube223, linspace2, cmap='bwr',
title=' '.join([control, 'minus', obs]))
add_map_subplot(224, cube224, linspace2, cmap='bwr',
title=' '.join([exper, 'minus', obs]))
# Add overall title
fig.suptitle(long_name, fontsize=14)
# Determine image filename:
fn_list = [long_name, exper, control, obs, str(layer)]
path = diagtools.folder(cfg['plot_dir']) + '_'.join(fn_list)
path = path.replace(' ', '') + image_extention
# Saving files:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
provenance_record = diagtools.prepare_provenance_record(
cfg,
caption=f'Quadmap models comparison against {obs}',
statistics=['mean', 'diff', ],
domain=['global'],
plot_type=['map'],
ancestors=list(input_files.keys()),
)
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(path, provenance_record)
def main(cfg):
"""
Load the config file, and send it to the plot maker.
Parameters
----------
cfg: dict
the opened global config dictionairy, passed by ESMValTool.
"""
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename,
)
input_files = diagtools.get_input_files(cfg, index=index)
# #####
# Multi model time series
multi_model_maps(
cfg,
input_files,
)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config) | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/lib/stage.py | import abc
import logging
from oslo_utils import importutils
from cloudferry.lib.utils import local_db
LOG = logging.getLogger(__name__)
local_db.execute_once("""
CREATE TABLE IF NOT EXISTS stages (
stage TEXT,
signature JSON,
PRIMARY KEY (stage)
)
""")
class Stage(object):
__metaclass__ = abc.ABCMeta
dependencies = []
def __init__(self, config):
"""
Stage constructor
:param config: cloudferry.lib.config.Configuration instance
:return:
"""
self.config = config
@abc.abstractmethod
def signature(self):
"""
Returns signature for data that will be produced during this stage. If
the signature differ from the one stored in database, then invalidate
method will be called.
:return:
"""
return
@abc.abstractmethod
def execute(self):
"""
Should contain any code that is required to be executed during this
stage.
"""
return
@abc.abstractmethod
def invalidate(self, old_signature, new_signature, force=False):
"""
Should destroy any stale data based on signature difference.
:param old_signature: old signature stored in DB
:param new_signature: new signature
"""
return
def execute_stage(class_name, config, force=False):
"""
Execute stage specified by `class_name` argument.
:param class_name: fully qualified stage class name
:param config: config.Configuration instance
"""
# Create stage object
cls = importutils.import_class(class_name)
assert issubclass(cls, Stage)
stage = cls(config)
# Execute dependency stages
for dependency in stage.dependencies:
execute_stage(dependency, config)
# Check if there is data from this stage in local DB
new_signature = stage.signature()
old_signature = None
need_invalidate = False
need_execute = False
with local_db.Transaction() as tx:
row = tx.query_one('SELECT signature FROM stages WHERE stage=:stage',
stage=class_name)
if row is None:
need_execute = True
else:
old_signature = row['signature'].data
need_invalidate = (old_signature != new_signature)
# Run invalidate and execute if needed
with local_db.Transaction() as tx:
if need_invalidate or force:
stage.invalidate(old_signature, new_signature, force=force)
tx.execute('DELETE FROM stages WHERE stage=:stage',
stage=class_name)
need_execute = True
if need_execute:
stage.execute()
tx.execute('INSERT INTO stages VALUES (:stage, :signature)',
stage=class_name,
signature=local_db.Json(new_signature))
LOG.info('Stage %s executed successfully', class_name)
else:
LOG.info('Skipping stage %s', class_name) | PypiClean |
/CloudFerry-1.55.2.tar.gz/CloudFerry-1.55.2/cloudferry/lib/os/discovery/nova.py | import logging
from novaclient import exceptions
from cloudferry import discover
from cloudferry import model
from cloudferry.model import compute
from cloudferry.model import identity
from cloudferry.model import image
from cloudferry.model import storage
from cloudferry.lib.utils import qemu_img
from cloudferry.lib.utils import remote
from cloudferry.lib.os import clients
from cloudferry.lib.os import cloud_db
EXT_ATTR_INSTANCE_NAME = 'OS-EXT-SRV-ATTR:instance_name'
EXT_ATTR_HYPER_HOST = 'OS-EXT-SRV-ATTR:hypervisor_hostname'
EXT_ATTR_AZ = 'OS-EXT-AZ:availability_zone'
EXT_ATTR_HOST = 'OS-EXT-SRV-ATTR:host'
EXT_ATTR_VOL_ATTACHMENTS = 'os-extended-volumes:volumes_attached'
LOG = logging.getLogger(__name__)
class FlavorDiscoverer(discover.Discoverer):
discovered_class = compute.Flavor
BASE_QUERY = '''
SELECT
`id` AS `object_id`,
`flavorid` AS `flavor_id`,
SIGN(`deleted`) AS `is_deleted`,
`disabled` AS `is_disabled`,
`is_public`, `name`, `vcpus`, `memory_mb`, `root_gb`, `ephemeral_gb`,
`swap` AS `swap_mb`,
`vcpu_weight`, `rxtx_factor`
FROM instance_types
'''
EXTRA_SPECS_QUERY = '''
SELECT `key`, `value` FROM `instance_type_extra_specs`
WHERE `deleted` = 0 AND `instance_type_id` = %(id)s
'''
def discover_all(self):
with cloud_db.connection(self.cloud.nova_db) as db, \
model.Session() as session:
flavor_rows = db.query(self.BASE_QUERY + 'WHERE `deleted` = 0')
for flavor_data in flavor_rows:
self._populate_extra(db, flavor_data)
session.store(self.load_from_cloud(flavor_data))
def discover_one(self, internal_id):
with cloud_db.connection(self.cloud.nova_db) as db, \
model.Session() as session:
flavor_data = db.query_one(self.BASE_QUERY + 'WHERE `id` = %(id)s',
id=internal_id)
self._populate_extra(db, flavor_data)
flavor = self.load_from_cloud(flavor_data)
session.store(flavor)
return flavor
def discover_by_flavor_id(self, flavor_id):
with cloud_db.connection(self.cloud.nova_db) as db, \
model.Session() as session:
flavor_data = db.query_one(
self.BASE_QUERY +
'WHERE `flavorid` = %(flavor_id)s AND `deleted` = 0',
flavor_id=flavor_id)
self._populate_extra(db, flavor_data)
flavor = self.load_from_cloud(flavor_data)
session.store(flavor)
return flavor
def load_from_cloud(self, raw_data):
data = dict(raw_data)
data['object_id'] = self.make_id(data['object_id'])
return compute.Flavor.load(data)
def _populate_extra(self, db, flavor_data):
extra_specs = {}
for key, value in db.query(self.EXTRA_SPECS_QUERY,
id=flavor_data['object_id']):
extra_specs[key] = value
flavor_data['extra_specs'] = extra_specs
class ServerDiscoverer(discover.Discoverer):
discovered_class = compute.Server
def discover_all(self):
compute_client = clients.compute_client(self.cloud)
avail_hosts = self._list_available_compute_hosts(compute_client)
servers = {}
# Go through each tenant since nova don't return more items than
# specified in osapi_max_limit configuration option (1000 by default)
# in single API call
for tenant in self._get_tenants():
LOG.debug('Discovering servers from cloud "%s" tenant "%s"',
self.cloud.name, tenant.name)
tenant_id = tenant.id
raw_server_list = self.retry(compute_client.servers.list,
search_opts={
'all_tenants': True,
'tenant_id': tenant_id,
},
returns_iterable=True)
for raw_server in raw_server_list:
host = getattr(raw_server, EXT_ATTR_HOST)
if host not in avail_hosts:
LOG.warning('Skipping server %s in tenant %s, host not '
'available.', host, tenant.name)
continue
# Convert server data to model conforming format
server = self.load_from_cloud(raw_server)
hyper_host = getattr(raw_server, EXT_ATTR_HYPER_HOST)
servers.setdefault(hyper_host, []).append(server)
# Collect information about ephemeral disks
# TODO: work with different servers in parallel
for host, host_servers in servers.items():
LOG.debug('Getting ephemeral disks information from cloud %s '
'host %s', self.cloud.name, host)
with remote.RemoteExecutor(self.cloud, host) as remote_executor:
for server in host_servers:
_populate_ephemeral_disks(remote_executor, server)
# Store data to local database
with model.Session() as session:
for host_servers in servers.values():
for server in host_servers:
session.store(server)
if _need_image_membership(server):
image_member_uuid = image.ImageMember.make_uuid(
server.image, server.tenant)
server.image_membership = self.find_obj(
image.ImageMember, image_member_uuid)
def discover_one(self, uuid):
compute_client = clients.compute_client(self.cloud)
try:
raw_server = self.retry(compute_client.servers.get, uuid,
expected_exceptions=[exceptions.NotFound])
except exceptions.NotFound:
raise discover.NotFound()
# Check if server host is available
avail_hosts = self._list_available_compute_hosts(compute_client)
host = getattr(raw_server, EXT_ATTR_HOST)
if host not in avail_hosts:
LOG.warning('Skipping server %s, host not available.',
host)
return None
# Convert server data to model conforming format
server = self.load_from_cloud(raw_server)
with remote.RemoteExecutor(
self.cloud, server.hypervisor_host) as remote_executor:
_populate_ephemeral_disks(remote_executor, server)
# Store server
with model.Session() as session:
session.store(server)
if _need_image_membership(server):
image_member_uuid = image.ImageMember.make_uuid(
server.image, server.tenant)
server.image_membership = self.find_obj(
image.ImageMember, image_member_uuid)
return server
def load_from_cloud(self, data):
compute_client = clients.compute_client(self.cloud)
# Workaround for grizzly lacking EXT_ATTR_VOL_ATTACHMENTS
if hasattr(data, EXT_ATTR_VOL_ATTACHMENTS):
raw_attachments = [
'{0}:{1}'.format(data.id, attachment['id'])
for attachment in
getattr(data, EXT_ATTR_VOL_ATTACHMENTS)]
else:
raw_attachments = [
'{0}:{1}'.format(attachment.serverId, attachment.volumeId)
for attachment in
self.retry(compute_client.volumes.get_server_volumes, data.id,
returns_iterable=True)]
server_image = None
if data.image:
server_image = data.image['id']
attached_volumes = [self.find_ref(storage.Attachment, attachment)
for attachment in raw_attachments]
with cloud_db.connection(self.cloud.nova_db) as db:
flavor_id = self._get_flavor(db, data.id)
hypervisor_host = getattr(data, EXT_ATTR_HYPER_HOST)
server_dict = {
'object_id': self.make_id(data.id),
'security_groups': [], # TODO: implement security groups
'tenant': self.find_ref(identity.Tenant, data.tenant_id),
'image': self.find_ref(image.Image, server_image),
'image_membership': None,
'flavor': self.find_ref(compute.Flavor, flavor_id),
'availability_zone': getattr(data, EXT_ATTR_AZ),
'host': getattr(data, EXT_ATTR_HOST),
'hypervisor_hostname': hypervisor_host,
'instance_name': getattr(data, EXT_ATTR_INSTANCE_NAME),
'attached_volumes': [av for av in attached_volumes if av],
'ephemeral_disks': [], # Ephemeral disks will be filled later
'compute_node': self.find_ref(compute.ComputeNode,
hypervisor_host),
}
for attr_name in ('name', 'status', 'user_id', 'key_name',
'config_drive', 'metadata'):
if hasattr(data, attr_name):
server_dict[attr_name] = getattr(data, attr_name)
return compute.Server.load(server_dict)
def _get_tenants(self):
identity_client = clients.identity_client(self.cloud)
return identity_client.tenants.list()
def _list_available_compute_hosts(self, compute_client):
return set(c.host
for c in self.retry(compute_client.services.list,
binary='nova-compute',
returns_iterable=True)
if c.state == 'up' and c.status == 'enabled')
@staticmethod
def _get_flavor(nova_db, server_id):
data = nova_db.query_one(
'SELECT instance_type_id AS flavor_id '
'FROM instances WHERE uuid = %(uuid)s',
uuid=server_id)
assert data is not None, 'Flavor id for server not found. Most ' \
'probably database configuration is incorrect'
return data['flavor_id']
def _populate_ephemeral_disks(rmt_exec, server):
try:
output = rmt_exec.sudo('virsh domblklist {instance}',
instance=server.instance_name)
except remote.RemoteFailure:
LOG.error('Unable to get ephemeral disks for server %s, skipping.',
server, exc_info=True)
return
volume_targets = set()
for attachment in server.attached_volumes:
volume_targets.add(attachment.device.replace('/dev/', ''))
for line in output.splitlines():
split = line.split(None, 1)
if len(split) != 2:
continue
target, path = split
if target in volume_targets or not path.startswith('/'):
continue
size, base_path, format = _get_disk_info(rmt_exec, path)
if base_path is not None:
base_size, _, base_format = _get_disk_info(rmt_exec, base_path)
else:
base_size = base_format = None
if size is not None:
eph_disk = compute.EphemeralDisk.load({
'path': path,
'size': size,
'format': format,
'base_path': base_path,
'base_size': base_size,
'base_format': base_format,
})
server.ephemeral_disks.append(eph_disk)
def _need_image_membership(srv):
img = srv.image
if img is None:
return False
if img.is_public:
return False
return img.tenant != srv.tenant
def _get_disk_info(remote_executor, path):
try:
size_str = remote_executor.sudo('stat -c %s {path}', path=path)
except remote.RemoteFailure:
LOG.warn('Unable to get size of "%s", skipping disk.', path)
LOG.debug('Unable to get size of "%s", skipping disk.', path,
exc_info=True)
return None, None, None
disk_info = qemu_img.get_disk_info(remote_executor, path)
return int(size_str.strip()), disk_info.backing_filename, disk_info.format | PypiClean |
/ApiRequestManager-1.0.5-py3-none-any.whl/src/Config.py | class Config:
"""Config Object For an Api
Object that store Api configurations that will be needed
to execute requests
Args:
name(String: Required):
string to reference a Config api object
don't call 2 api with the same name or
an api config will be delete
base_url(String: Required):
url common part for all your requests with this api
ex
"https://api" will allow to create requests like
--> "https://api/firstpath"
--> "https://api/secondpath"
--> "https://api/thirdpath"
auth(Map: Optional):
if you need an authentication for the api
provide their the authentication header field
(ex: Authorization) and the token
like
auth -> {'the auth field here': 'Your token here'}
headers(Map: Optional):
if you need to provide other headers to api
do it like 'auth' argument (multiple header key/value accepted)
ex
header -> {
'first_header_field':'first_header_val',
'second_header_field':'second_header_val',
etc...
}
"""
name: str
base_url: str
auth: dict
headers: dict
def __init__(self, name: str, base_url: str, auth: dict = None, headers: dict = None):
self.name = name
self.base_url = base_url
self.auth = auth
self.headers = headers
def __eq__(self, other):
""" '==' operator implemented: same 'name' attribut -> equality """
return self.name == other.name
def __repr__(self):
return "Config(name=%r, base_url=%r, auth=%r, headers=%r)" % (self.name, self.base_url, self.auth, self.headers)
def __hash__(self):
"""
hash implemented like: same 'name' attribut -> same hash
ApiConfig delete dupplicates names for Config objects
so Config objects in ApiConfigs.configs have unique hash
"""
return hash(self.name) | PypiClean |
/ConfigSpace-nni-0.4.7.tar.gz/ConfigSpace-nni-0.4.7/ConfigSpace/nx/release.py |
from __future__ import absolute_import
import os
import sys
import time
import datetime
import subprocess
basedir = os.path.abspath(os.path.split(__file__)[0])
def get_revision():
"""Returns revision and vcs information, dynamically obtained."""
vcs, revision, tag = None, None, None
hgdir = os.path.join(basedir, '..', '.hg')
gitdir = os.path.join(basedir, '..', '.git')
if os.path.isdir(hgdir):
vcs = 'mercurial'
try:
p = subprocess.Popen(['hg', 'id'],
cwd=basedir,
stdout=subprocess.PIPE)
except OSError:
# Could not run hg, even though this is a mercurial repository.
pass
else:
stdout = p.communicate()[0]
# Force strings instead of unicode.
x = list(map(str, stdout.decode().strip().split()))
if len(x) == 0:
# Somehow stdout was empty. This can happen, for example,
# if you're running in a terminal which has redirected stdout.
# In this case, we do not use any revision/tag info.
pass
elif len(x) == 1:
# We don't have 'tip' or anything similar...so no tag.
revision = str(x[0])
else:
revision = str(x[0])
tag = str(x[1])
elif os.path.isdir(gitdir):
vcs = 'git'
# For now, we are not bothering with revision and tag.
vcs_info = (vcs, (revision, tag))
return revision, vcs_info
def get_info(dynamic=True):
## Date information
date_info = datetime.datetime.now()
date = time.asctime(date_info.timetuple())
revision, version, version_info, vcs_info = None, None, None, None
import_failed = False
dynamic_failed = False
if dynamic:
revision, vcs_info = get_revision()
if revision is None:
dynamic_failed = True
if dynamic_failed or not dynamic:
# This is where most final releases of NetworkX will be.
# All info should come from version.py. If it does not exist, then
# no vcs information will be provided.
sys.path.insert(0, basedir)
try:
from version import date, date_info, version, version_info, vcs_info
except ImportError:
import_failed = True
vcs_info = (None, (None, None))
else:
revision = vcs_info[1][0]
del sys.path[0]
if import_failed or (dynamic and not dynamic_failed):
# We are here if:
# we failed to determine static versioning info, or
# we successfully obtained dynamic revision info
version = ''.join([str(major), '.', str(minor)])
if dev:
version += '.dev_' + date_info.strftime("%Y%m%d%H%M%S")
version_info = (name, major, minor, revision)
return date, date_info, version, version_info, vcs_info
## Version information
name = 'networkx'
major = "1"
minor = "8.1"
## Declare current release as a development release.
## Change to False before tagging a release; then change back.
dev = False
description = "Python package for creating and manipulating graphs and networks"
long_description = \
"""
NetworkX is a Python package for the creation, manipulation, and
study of the structure, dynamics, and functions of complex networks.
"""
license = 'BSD'
authors = {'Hagberg' : ('Aric Hagberg','[email protected]'),
'Schult' : ('Dan Schult','[email protected]'),
'Swart' : ('Pieter Swart','[email protected]')
}
maintainer = "NetworkX Developers"
maintainer_email = "[email protected]"
url = 'http://networkx.lanl.gov/'
download_url="http://networkx.lanl.gov/download/networkx"
platforms = ['Linux', 'Mac OSX', 'Windows', 'Unix']
keywords = ['Networks', 'Graph Theory', 'Mathematics', 'network', 'graph', 'discrete mathematics', 'math']
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics']
date, date_info, version, version_info, vcs_info = get_info() | PypiClean |
/ESGmetric_from_PDF-0.1.tar.gz/ESGmetric_from_PDF-0.1/ESGmetric_from_PDF/Training_data_strings.py |
patterns = [ [{'TEXT':'KPMG'}],
[{'TEXT':'PricewaterhouseCoopers'}],
[{'TEXT':'Ernst'},{'IS_PUNCT':True},{'TEXT':'Young'}],
[{'TEXT':'EY'}],
[{'TEXT':'Deloitte'}],
[{'TEXT':'PwC'}]
]
NAME_TEXT = ["for and on behalf of KPMG LLP, Statutory Auditor",
"for and on behalf of PricewaterhouseCoopers LLP",
"for and on behalf of Ernst & Young LLP, Statutory Auditor",
"for and on behalf of Deloitte LLP",
"For and on behalf of Deloitte Ireland LLP",
"KPMG audit partner",
"The following fees were paid or are payable to the company’s auditors, KPMG LLP and other firms in the KPMG network",
"Following an external audit tender in 2015, KPMG was appointed as Barclays’ Statutory Auditor",
"KPMG was appointed by shareholders as the Group’s Statutory Auditor in 2016 following a formal tender process.",
"KPMG LLP (KPMG) has been the company’s External Auditor since",
"Confirmed the external auditor, PwC, remains independent",
"The external auditors are PwC who were first appointed for the financial year commencing 1 January 2016 following a competitive tender process.",
"External Auditors PricewaterhouseCoopers LLP",
"the relationship with the Group’s current audit firm KPMG LLP (KPMG)",
"The Group’s external auditor is KPMG LLP (KPMG)",
"PricewaterhouseCoopers LLP (PwC) was appointed as the Group’s external auditor ",
"Given the continuing effectiveness of PwC in their role as external auditor",
" it is in the best interests of shareholders for PwC to remain as external auditor for the following financial year",
"The Committee therefore recommended to the Board that a resolution to reappoint PwC as external auditor of the Company",
"Deloitte LLP (Deloitte) continues as the Group’s external auditor. ",
"PwC were appointed as the Group’s external auditor for the financial year ",
"KPMG has been the Group’s auditor since 2018",
"the external auditor, PricewaterhouseCoopers LLP,"
"The current external auditor, PricewaterhouseCoopers LLP, was appointed in 2007.",
"The Board appointed PricewaterhouseCoopers LLP (PwC) as external auditor in June 2014",
"During the period, PricewaterhouseCoopers LLP, the Group’s auditor, provided the following services",
"As regards external audits, the sustainability performance accounted in the Report is subject to limited audit by an independent firm (PricewaterhouseCoopers",
"The legal audit of the accounts is entrusted to PricewaterhouseCoopers S.p.A., an external auditing firm included in the register of accounting auditors. ",
"The firm appointed to undertake the external audit of the accounts of the Company is PricewaterhouseCoopers S.p.A. (the “ExternalAuditing Firm”),",
"The Audit Committee has now overseen the first year following a transition in the External Auditor, working with Ernst and Young LLP. ",
"This statement is subject to review by Deloitte, our external auditor.",
"The lead audit engagement partners for EY in Australia and the United Kingdom (together, ‘EY’) were appointed for commencement from 1 July 2019",
"the Board announced it had selected EY to be the Group’s auditor from the financial year",
"EY was the auditor during FY2020.",
"The 2020 financial year-end is KPMG LLP’s third financial reporting period as the Group’s external auditor,",
"During the year, Deloitte provided external audit services for regulatory and statutory reporting. ",
"The performance of the Company’s external auditor, KPMG LLP, is kept under review by the Board and the Committee.",
"KPMG was appointed as external auditor in July 2015",
"Deloitte LLP Statutory Audit",
"Deloitte LLP was originally appointed by the shareholders in 2018 as our external auditor",
"The Company engaged Deloitte & Touche LLP, an independent registered public accounting f irm, to audit and render an opinion on the consolidated f inancial statements",
"Deloitte was appointed as auditor of the company",
"the Committee has recommended to the Board that Deloitte be reappointed under the current external audit contract ",
"Deloitte’s audit of the Group financial statements for the year ended",
"shareholders approved the appointment of PricewaterhouseCoopers LLP (PwC) as the Company’s new Statutory Auditor.",
"to propose to shareholders the re-appointment of PwC as auditor ",
"in its capacity as Group external auditor, PwC undertakes an audit of the relevant sections",
"with its external auditors, PricewaterhouseCoopers LLP (PwC), ",
"A resolution will be proposed at the AGM on 30 April 2021 for the reappointment of PricewaterhouseCoopers LLP (PwC) as auditor of the Company. ",
"during 2020, PwC undertook various audit and audit related services.",
] | PypiClean |
/AlbertMarker-0.0.1.1.tar.gz/AlbertMarker-0.0.1.1/marker/utils.py | import torch
import os
import datetime
import unicodedata
import json
class InputFeatures(object):
def __init__(self, input_id, label_id, input_mask):
self.input_id = input_id
self.label_id = label_id
self.input_mask = input_mask
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def read_corpus(path, max_length, label_dic, vocab):
"""
:param path:数据文件路径
:param max_length: 最大长度
:param label_dic: 标签字典
:return:
"""
# file = open(path, encoding='utf-8')
# content = file.readlines()
# file.close()
result = []
# with open(path, 'r', encoding = 'utf-8') as f:
tjson=Tjson(file_path=path)
for line in tjson.load():
# text, label = line.strip().split('|||')
# tokens = text.split()
# label = label.split()
# print(line)
tokens=line['text']
label=line['label']
if len(tokens) > max_length-2:
tokens = tokens[0:(max_length-2)]
label = label[0:(max_length-2)]
tokens_f =['[CLS]'] + tokens + ['[SEP]']
label_f = ["<start>"] + label + ['<eos>']
input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]
label_ids = [label_dic[i] for i in label_f]
input_mask = [1] * len(input_ids)
while len(input_ids) < max_length:
input_ids.append(0)
input_mask.append(0)
label_ids.append(label_dic['<pad>'])
assert len(input_ids) == max_length
assert len(input_mask) == max_length
assert len(label_ids) == max_length
feature = InputFeatures(input_id=input_ids, input_mask=input_mask, label_id=label_ids)
result.append(feature)
return result
class InputPreFeatures(object):
def __init__(self, input_id, input_mask):
self.input_id = input_id
# self.label_id = label_id
self.input_mask = input_mask
def build_input(content, max_length, vocab):
"""
:param content: 输入句子列表
:param max_length: 最大长度
:return:
"""
# file = open(path, encoding='utf-8')
# content = file.readlines()
# file.close()
result = []
for line in content:
# text, label = line.strip().split('|||')
tokens = line.split()
# label = label.split()
if len(tokens) > max_length-2:
tokens = tokens[0:(max_length-2)]
# label = label[0:(max_length-2)]
# print(tokens)
tokens_f =['[CLS]'] + tokens + ['[SEP]']
# print('tokens_f',tokens_f)
# label_f = ["<start>"] + label + ['<eos>']
input_ids = [int(vocab[i]) if i in vocab else int(vocab['[UNK]']) for i in tokens_f]
# label_ids = [label_dic[i] for i in label_f]
input_mask = [1] * len(input_ids)
while len(input_ids) < max_length:
input_ids.append(0)
input_mask.append(0)
# label_ids.append(label_dic['<pad>'])
assert len(input_ids) == max_length
assert len(input_mask) == max_length
# assert len(label_ids) == max_length
feature = InputPreFeatures(input_id=input_ids, input_mask=input_mask)
result.append(feature)
return result
def save_model(model, epoch, path='result', **kwargs):
"""
默认保留所有模型
:param model: 模型
:param path: 保存路径
:param loss: 校验损失
:param last_loss: 最佳epoch损失
:param kwargs: every_epoch or best_epoch
:return:
"""
if not os.path.exists(path):
os.mkdir(path)
if kwargs.get('name', None) is None:
# cur_time = datetime.datetime.now().strftime('%Y-%m-%d#%H:%M:%S')
# name = cur_time + '--epoch:{}'.format(epoch)
name="pytorch_model.bin"
full_name = os.path.join(path, name)
torch.save(model.state_dict(), full_name)
# print('Saved model at epoch {} successfully'.format(epoch))
with open('{}/checkpoint'.format(path), 'w') as file:
file.write(name)
# print('Write to checkpoint')
def load_model(model, path='result', **kwargs):
if kwargs.get('name', None) is None:
with open('{}/checkpoint'.format(path)) as file:
content = file.read().strip()
name = os.path.join(path, content)
else:
name=kwargs['name']
# name = os.path.join(path,name)
name = os.path.join(name)
# print('name',name)
model.load_state_dict(torch.load(name, map_location=lambda storage, loc: storage))
# print('load model {} successfully'.format(name))
return model
class Tjson:
"""
处理json信息函数
"""
def __init__(self,file_path="data.json"):
self.file_path=file_path
def save(self,data):
"""
保存数据函数
逐行写入
>>> data=[{'a':'ess'}]
"""
with open(self.file_path, 'a+', encoding='utf-8') as f:
for item in data:
line = json.dumps(item, ensure_ascii=False)
f.write(line+'\n')
def load(self):
"""
加载数据
"""
with open(self.file_path, 'r', encoding = 'utf-8') as f:
for line in f:
data=json.loads(line[:-1])
yield data
"""
#使用
data=[{'a':'ess'}]
Json().save(data)
print(Json().load())
""" | PypiClean |
/Indomielibs-2.0.106.tar.gz/Indomielibs-2.0.106/pyrogram/connection/transport/tcp/tcp.py |
import asyncio
import ipaddress
import logging
import socket
from concurrent.futures import ThreadPoolExecutor
import socks
log = logging.getLogger(__name__)
class TCP:
TIMEOUT = 10
def __init__(self, ipv6: bool, proxy: dict):
self.socket = None
self.reader = None
self.writer = None
self.lock = asyncio.Lock()
self.loop = asyncio.get_event_loop()
self.proxy = proxy
if proxy:
hostname = proxy.get("hostname")
try:
ip_address = ipaddress.ip_address(hostname)
except ValueError:
self.socket = socks.socksocket(socket.AF_INET)
else:
if isinstance(ip_address, ipaddress.IPv6Address):
self.socket = socks.socksocket(socket.AF_INET6)
else:
self.socket = socks.socksocket(socket.AF_INET)
self.socket.set_proxy(
proxy_type=getattr(socks, proxy.get("scheme").upper()),
addr=hostname,
port=proxy.get("port", None),
username=proxy.get("username", None),
password=proxy.get("password", None)
)
self.socket.settimeout(TCP.TIMEOUT)
log.info("Using proxy %s", hostname)
else:
self.socket = socket.socket(
socket.AF_INET6 if ipv6
else socket.AF_INET
)
self.socket.setblocking(False)
async def connect(self, address: tuple):
if self.proxy:
with ThreadPoolExecutor(1) as executor:
await self.loop.run_in_executor(executor, self.socket.connect, address)
else:
try:
await asyncio.wait_for(asyncio.get_event_loop().sock_connect(self.socket, address), TCP.TIMEOUT)
except asyncio.TimeoutError: # Re-raise as TimeoutError. asyncio.TimeoutError is deprecated in 3.11
raise TimeoutError("Connection timed out")
self.reader, self.writer = await asyncio.open_connection(sock=self.socket)
async def close(self):
try:
if self.writer is not None:
self.writer.close()
await asyncio.wait_for(self.writer.wait_closed(), TCP.TIMEOUT)
except Exception as e:
log.info("Close exception: %s %s", type(e).__name__, e)
async def send(self, data: bytes):
async with self.lock:
try:
if self.writer is not None:
self.writer.write(data)
await self.writer.drain()
except Exception as e:
log.info("Send exception: %s %s", type(e).__name__, e)
raise OSError(e)
async def recv(self, length: int = 0):
data = b""
while len(data) < length:
try:
chunk = await asyncio.wait_for(
self.reader.read(length - len(data)),
TCP.TIMEOUT
)
except (OSError, asyncio.TimeoutError):
return None
else:
if chunk:
data += chunk
else:
return None
return data | PypiClean |
/Flask-Generic-Views-0.1.1.tar.gz/Flask-Generic-Views-0.1.1/flask_generic_views/core.py | from flask import Response, abort, redirect, render_template, request, url_for
from flask.views import MethodView as BaseMethodView
from flask.views import View as BaseView
from werkzeug.datastructures import CombinedMultiDict
from werkzeug.routing import BuildError
from werkzeug.urls import url_parse
from flask_generic_views._compat import iteritems
class View(BaseView):
""" The master class-based base view.
All other generic views inherit from this base class. This class itself
inherits from :class:`flask.views.View` and adds a generic constructor,
that will convert any keyword arguments to instance attributes.
.. code-block:: python
class GreetingView(View):
greeting = 'Hello'
def dispatch_request(self):
return "{} World!".format(self.greeting)
bonjour_view = GreetingView.as_view('bonjour', greeting='Bonjour')
app.add_url_rule('/bonjour, view_func=bonjour_view)
The above example shows a generic view that allows us to change the
greeting while setting up the URL rule.
"""
def __init__(self, **kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
class MethodView(BaseMethodView, View):
"""View class that routes to methods based on HTTP verb.
This view allows us to break down logic based on the HTTP verb used, and
avoid conditionals in our code.
.. code-block:: python
class GreetingView(View):
greeting = 'Hello'
def get(self):
return "{} World!".format(self.greeting)
def post(self):
name = request.form.get('name', 'World')
return "{} {}!".format(self.greeting, name)
bonjour_view = GreetingView.as_view('bonjour', greeting='Bonjour')
app.add_url_rule('/bonjour, view_func=bonjour_view)
The above example will process the request differently depending on wether
it was a HTTP POST or GET.
"""
class ContextMixin(object):
"""Default handling of view context data any mixins that modifies the views
context data should inherit from this class.
.. code-block:: python
class RandomMixin(ContextMixin):
def get_context_data(self, **kwargs):
kwargs.setdefault('number', random.randrange(1, 100))
return super(RandomMixin, self).get_context_data(**kwargs)
"""
def get_context_data(self, **kwargs):
"""Returns a dictionary representing the view context. Any keyword
arguments provided will be included in the returned context.
The context of all class-based views will include a ``view`` variable
that points to the :class:`View` instance.
:param kwargs: context
:type kwargs: dict
:returns: context
:rtype: dict
"""
kwargs.setdefault('view', self)
return kwargs
class TemplateResponseMixin(object):
"""Creates :class:`~werkzeug.wrappers.Response` instances with a rendered
template based on the given context. The choice of template is configurable
and can be customised by subclasses.
.. code-block:: python
class RandomView(TemplateResponseMixin, MethodView):
template_name = 'random.html'
def get(self):
context = {'number': random.randrange(1, 100)}
return self.create_response(context)
random_view = RandomView.as_view('random')
app.add_url_rule('/random, view_func=random_view)
"""
template_name = None
response_class = Response
mimetype = None
def create_response(self, context=None, **kwargs):
"""Returns a :attr:`response_class` instance containing the rendered
template.
If any keyword arguments are provided, they will be passed to the
constructor of the response class.
:param context: context for template
:type context: dict
:param kwargs: response keyword arguments
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
kwargs.setdefault('mimetype', self.mimetype)
template_names = self.get_template_list()
response = render_template(template_names, **context)
return self.response_class(response, **kwargs)
def get_template_list(self):
"""Returns a list of template names to use for when rendering the
template.
The default implementation will return a list containing
:attr:`template_name`, when not specified a :exc:`NotImplementedError`
exception will be raised.
:returns: template list
:rtype: list
:raises NotImplementedError: when :attr:`template_name` is not set
"""
if self.template_name is None:
error = ("{0} requires either a definition of 'template_name' or "
"an implementation of 'get_template_list()'")
raise NotImplementedError(error.format(self.__class__.__name__))
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, MethodView):
"""Renders a given template, with the context containing parameters
captured by the URL rule.
.. code-block:: python
class AboutView(View):
template_name = 'about.html'
def get_context_data(self, **kwargs):
kwargs['staff'] = ('John Smith', 'Jane Doe')
return super(AboutView, self).get_context_data(self, **kwargs)
app.add_url_rule('/about', view_func=AboutView.as_view('about'))
The :class:`~flask_generic_views.views.TemplateView` can be subclassed to
create custom views that render a template.
.. code-block:: python
about_view = TemplateView.as_view('about', template_name='about.html')
app.add_url_rule('/about', view_func=about_view, defaults={
'staff': ('John Smith', 'Jane Doe')
})
It can also be used directly in a URL rule to avoid having to create
additional classes.
"""
def get(self, **kwargs):
"""Handle request and return a template response.
Any keyword arguments will be passed to the views context.
:param kwargs: keyword arguments from url rule
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
context = self.get_context_data(**kwargs)
return self.create_response(context)
class RedirectView(View):
"""Redirects to a given URL.
The given URL may contain dictionary-style format fields which will be
interpolated against the keyword arguments captured from the URL rule
using the :meth:`~str.format` method.
An URL rule endpoint may be given instead, which will be passed to
:meth:`~flask.url_for` along with any keyword arguments captured by the
URL rule.
When no URL can be found a :exc:`~werkzeug.exceptions.Gone` exception
will be raised.
.. code-block:: python
class ShortView(RedirectView):
permanent = True
query_string = True
endpoint = 'post-detail'
def get_redirect_url(self, **kwargs):
post = Post.query.get_or_404(base62.decode(kwargs['code']))
kwargs['slug'] = post.slug
return super(ShortView, self).get_redirect_url(**kwargs)
short_view = ShortView.as_view('short')
app.add_url_rule('/s/<code>', view_func=short_view)
The above example will redirect "short links" where the pk is base62
encoded to the correct url.
.. code-block:: python
google_view = RedirectView.as_view('google', url='http://google.com/')
app.add_url_rule('/google', view_func=google_view)
It can also be used directly in a URL rule to avoid having to create
additional classes for simple redirects.
"""
url = None
endpoint = None
permanent = False
query_string = False
def get_redirect_url(self, **kwargs):
"""Retrieve URL to redirect to.
When :attr:`url` is not None then it is returned after being
interpolated with the keyword arguments using :meth:`~str.format`.
When :attr:`url` is None and :attr:`endpoint` is not None
then it is passed to :meth:`~flask.url_for` with the keyword arguments,
and any query string is removed.
The query string from the current request can be added to the new
URL by setting :attr:`query_string` to ``True``.
:param kwargs: keyword arguments
:type kwargs: dict
:returns: URL
:rtype: str
"""
if self.url is not None:
url = self.url.format(**kwargs)
elif self.endpoint is not None:
try:
url = url_for(self.endpoint, **kwargs)
except BuildError:
return None
else:
url = url_parse(url).replace(query='').to_url()
else:
return None
query = request.environ.get('QUERY_STRING', '')
if self.query_string and query:
url = url_parse(url).replace(query=query).to_url()
return url
def dispatch_request(self, **kwargs):
"""Redirect the user to the result of.
:meth:`~RedirectView.get_redirect_url`, when by default it will issue a
302 temporary redirect, except when :attr:`permanent` is
set to the ``True``, then a 301 permanent redirect will be used.
When the redirect URL is None, a :exc:`~werkzeug.exceptions.Gone`
exception will be raised.
Any keyword arguments will be used to build the URL.
:param kwargs: keyword arguments from url rule
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
url = self.get_redirect_url(**kwargs)
if url is None:
abort(410)
if self.permanent:
return redirect(url, code=301)
return redirect(url)
class FormMixin(ContextMixin):
"""Provides facilities for creating and displaying forms."""
data = {}
form_class = None
success_url = None
prefix = ''
def get_data(self):
"""Retrieve data to pass to the form.
By default returns a copy of :attr:`data`.
:returns: data
:rtype: dict
"""
return self.data.copy()
def get_prefix(self):
"""Retrieve prefix to pass to the form.
By default returns :attr:`prefix`.
:returns: prefix
:rtype: str
"""
return self.prefix
def get_formdata(self):
"""Retrieve prefix to pass to the form.
By default returns a :class:`werkzeug.datastructures.CombinedMultiDict`
containing :attr:`flask.request.form` and :attr:`flask.request.files`.
:returns: form / file data
:rtype: werkzeug.datastructures.CombinedMultiDict
"""
return CombinedMultiDict([request.form, request.files])
def get_form_kwargs(self):
"""Retrieve the keyword arguments required to instantiate the form.
The ``data`` argument is set using :meth:`get_data` and the ``prefix``
argument is set using :meth:`get_prefix`. When the request is a POST or
PUT, then the ``formdata`` argument will be set using
:meth:`get_formdata`.
:returns: keyword arguments
:rtype: dict
"""
kwargs = {'data': self.get_data(),
'prefix': self.get_prefix()}
if request.method in ('POST', 'PUT'):
kwargs['formdata'] = self.get_formdata()
return kwargs
def get_form_class(self):
"""Retrieve the form class to instantiate.
By default returns :attr:`form_class`.
:returns: form class
:rtype: type
:raises NotImplementedError: when :attr:`form_class` is not set
"""
if self.form_class is None:
error = ("{0} requires either a definition of 'form_class' or "
"an implementation of 'get_form_class()'")
raise NotImplementedError(error.format(self.__class__.__name__))
return self.form_class
def get_form(self):
"""Create a :class:`~flask_wtf.Form` instance using
:meth:`get_form_class` using :meth:`get_form_kwargs`.
:returns: form
:rtype: flask_wtf.Form
"""
cls = self.get_form_class()
return cls(**self.get_form_kwargs())
def get_success_url(self):
"""Retrive the URL to redirect to when the form is successfully
validated.
By default returns :attr:`success_url`.
:returns: URL
:rtype: str
:raises NotImplementedError: when :attr:`success_url` is not set
"""
if self.success_url is None:
error = ("{0} requires either a definition of 'success_url' or "
"an implementation of 'get_success_url()'")
raise NotImplementedError(error.format(self.__class__.__name__))
return self.success_url
def form_valid(self, form):
"""Redirects to :meth:`get_success_url`.
:param form: form instance
:type form: flask_wtf.Form
:returns: response
:rtype: werkzeug.wrappers.Response
"""
return redirect(self.get_success_url())
def form_invalid(self, form):
"""Creates a response using the return value of.
:meth:`get_context_data()`.
:param form: form instance
:type form: flask_wtf.Form
:returns: response
:rtype: werkzeug.wrappers.Response
"""
return self.create_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
"""Extends the view context with a ``form`` variable containing the
return value of :meth:`get_form`.
:param kwargs: context
:type kwargs: dict
:returns: context
:rtype: dict
"""
kwargs.setdefault('form', self.get_form())
return super(FormMixin, self).get_context_data(**kwargs)
class ProcessFormView(MethodView):
"""Provides basic HTTP GET and POST processing for forms.
This class cannot be used directly and should be used with a
suitable mixin.
"""
def get(self, **kwargs):
"""Creates a response using the return value of.
:meth:`get_context_data()`.
:param kwargs: keyword arguments from url rule
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
return self.create_response(self.get_context_data())
def post(self, **kwargs):
"""Constructs and validates a form.
When the form is valid :meth:`form_valid` is called, when the form
is invalid :meth:`form_invalid` is called.
:param kwargs: keyword arguments from url rule
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
form = self.get_form()
if form.validate():
return self.form_valid(form)
else:
return self.form_invalid(form)
def put(self, **kwargs):
"""Passes all keyword arguments to :meth:`post`.
:param kwargs: keyword arguments from url rule
:type kwargs: dict
:returns: response
:rtype: werkzeug.wrappers.Response
"""
return self.post(**kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""View class to process handle forms without response creation."""
class FormView(TemplateResponseMixin, BaseFormView):
""" View class to display a :class:`~flask_wtf.Form`. When invalid
it shows the form with validation errors, when valid it redirects to a
new URL.
.. code-block:: python
class ContactForm(Form):
email = StringField('Name', [required(), email()])
message = TextAreaField('Message', [required()])
class ContactView(FormView):
form_class = ContactForm
success_url = '/thanks'
template_name = 'contact.html'
def form_valid(self, form):
message = Message('Contact Form', body=form.message.data,
recipients=['[email protected]'],
sender=form.email.data)
mail.send(message)
super(ContactView).form_valid(form)
The above example will render the template ``contact.html`` with an
instance of ``ContactForm`` in the context variable ``view``, when the user
submits the form with valid data an email will be sent, and the user
redirected to ``/thanks``, when the form is submitted with invalid data
``content.html`` will be rendered again, and the form will contain any
error messages.
""" | PypiClean |
/Fo4doG_mess_client-0.0.2.tar.gz/Fo4doG_mess_client-0.0.2/client/common/metaclasses.py | import dis
class ServerMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет клиентских
вызовов таких как: connect. Также проверяется, что серверный
сокет является TCP и работает по IPv4 протоколу.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
# Атрибуты, вызываемые функциями классов
attrs = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы и
# атрибуты.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
elif i.opname == 'LOAD_ATTR':
if i.argval not in attrs:
attrs.append(i.argval)
# Если обнаружено использование недопустимого метода connect,
# генерируем исключение:
if 'connect' in methods:
raise TypeError(
'Использование метода connect недопустимо в серверном классе')
# Если сокет не инициализировался константами SOCK_STREAM(TCP)
# AF_INET(IPv4), тоже исключение.
if not ('SOCK_STREAM' in attrs and 'AF_INET' in attrs):
raise TypeError('Некорректная инициализация сокета.')
super().__init__(clsname, bases, clsdict)
class ClientMaker(type):
'''
Метакласс, проверяющий что в результирующем классе нет серверных
вызовов таких как: accept, listen. Также проверяется, что сокет не
создаётся внутри конструктора класса.
'''
def __init__(cls, clsname, bases, clsdict):
# Список методов, которые используются в функциях класса:
methods = []
for func in clsdict:
# Пробуем
try:
ret = dis.get_instructions(clsdict[func])
# Если не функция то ловим исключение
except TypeError:
pass
else:
# Раз функция разбираем код, получая используемые методы.
for i in ret:
if i.opname == 'LOAD_GLOBAL':
if i.argval not in methods:
methods.append(i.argval)
# Если обнаружено использование недопустимого метода accept, listen,
# socket бросаем исключение:
for command in ('accept', 'listen', 'socket'):
if command in methods:
raise TypeError(
'В классе обнаружено использование запрещённого метода')
# Вызов get_message или send_message из utils считаем корректным
# использованием сокетов
if 'get_message' in methods or 'send_message' in methods:
pass
else:
raise TypeError(
'Отсутствуют вызовы функций, работающих с сокетами.')
super().__init__(clsname, bases, clsdict) | PypiClean |
/Flask-LazyViews-0.6.tar.gz/Flask-LazyViews-0.6/flask_lazyviews/utils.py | from flask.views import View
from werkzeug.utils import cached_property, import_string
__all__ = ('LazyView', )
class LazyView(object):
"""
Import view function only when necessary.
"""
def __init__(self, name, *args, **kwargs):
"""
Initialize ``LazyView`` instance for view that would be imported from
``name`` path.
"""
self.import_name = name
self.args, self.kwargs = args, kwargs
self.__module__, self.__name__ = name.rsplit('.', 1)
def __call__(self, *args, **kwargs):
"""
Make real call to the view.
"""
if self.args or self.kwargs:
view = self.view(self.args, self.kwargs)
else:
view = self.view
return view(*args, **kwargs)
def __eq__(self, other):
"""
Check that two lazy view instances have equal import names.
"""
try:
return self.import_name == other.import_name
except (AttributeError, ImportError):
return False
def __ne__(self, other):
"""
Check that two lazy view instances have not equal import names.
"""
return not self.__eq__(other)
def __getattribute__(self, name):
"""
Proxify documentation attribute from original view if it could be
imported.
"""
try:
if name == '__doc__':
return self.view.__doc__
except ImportError:
pass
return super(LazyView, self).__getattribute__(name)
def __repr__(self):
"""
Show custom repr message if view function exists.
"""
try:
return repr(self.view)
except ImportError:
return super(LazyView, self).__repr__()
@cached_property
def view(self):
"""
Import view from string and cache it to current class instance.
"""
imported = import_string(self.import_name)
if isinstance(imported, type) and issubclass(imported, View):
view_name = self.import_name.lower().replace('view', '')
return imported.as_view(view_name)
return imported | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/integrations/handlers/mlflow_handler/mlflow_handler.py | import requests
from datetime import datetime
from typing import Dict, Optional
import pandas as pd
from mlflow.tracking import MlflowClient
from mindsdb.integrations.libs.base import BaseMLEngine
class MLflowHandler(BaseMLEngine):
"""
The MLflow integration engine needs to have a working connection to MLFlow. For this:
- All models to use should be previously served
- An MLflow server should be running, to access its model registry
Example:
1. Run `mlflow server -p 5001 --backend-store-uri sqlite:////path/to/mlflow.db --default-artifact-root ./artifacts --host 0.0.0.0`
2. Run `mlflow models serve --model-uri ./model_path`
3. Run MindsDB
Note: above, `artifacts` is a folder to store artifacts for new experiments that do not specify an artifact store.
""" # noqa
name = 'mlflow'
def create(self, target: str, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> None:
args = args['using'] # ignore the rest of the problem definition
connection = MlflowClient(args['mlflow_server_url'], args['mlflow_server_path'])
model_name = args['model_name']
mlflow_models = [model.name for model in connection.search_registered_models()]
if model_name not in mlflow_models:
raise Exception(f"Error: model '{model_name}' not found in mlflow. Check serving and try again.")
args['target'] = target
self._check_model_url(args['predict_url'])
self.model_storage.json_set('args', args)
def predict(self, df, args=None):
args = self.model_storage.json_get('args') # override any incoming args for now
self._check_model_url(args['predict_url'])
resp = requests.post(args['predict_url'],
data=df.to_json(orient='records'),
headers={'content-type': 'application/json; format=pandas-records'})
answer = resp.json()
predictions = pd.DataFrame({args['target']: answer})
return predictions
def describe(self, key: Optional[str] = None) -> pd.DataFrame:
if key == 'info':
args = self.model_storage.json_get('args')
connection = MlflowClient(args['mlflow_server_url'], args['self.mlflow_server_path'])
models = {model.name: model for model in connection.search_registered_models()}
model = models[key]
latest_version = model.latest_versions[-1]
description = {
'NAME': [model.name],
'USER_DESCRIPTION': [model.description],
'LAST_STATUS': [latest_version.status],
'CREATED_AT': [datetime.fromtimestamp(model.creation_timestamp//1000).strftime("%m/%d/%Y, %H:%M:%S")],
'LAST_UPDATED': [datetime.fromtimestamp(model.last_updated_timestamp//1000).strftime("%m/%d/%Y, %H:%M:%S")],
'TAGS': [model.tags],
'LAST_RUN_ID': [latest_version.run_id],
'LAST_SOURCE_PATH': [latest_version.source],
'LAST_USER_ID': [latest_version.user_id],
'LAST_VERSION': [latest_version.version],
}
return pd.DataFrame.from_dict(description)
else:
tables = ['info']
return pd.DataFrame(tables, columns=['tables'])
@staticmethod
def _check_model_url(url):
""" try post without data, check status code not in (not_found, method_not_allowed) """
try:
resp = requests.post(url)
if resp.status_code in (404, 405):
raise Exception(f'Model url is incorrect, status_code: {resp.status_code}')
except requests.RequestException as e:
raise Exception(f'Model url is incorrect: {str(e)}') | PypiClean |
/dyv-0.5.1.tar.gz/dyv-0.5.1/dyv.py | import codecs
import os
import re
from os.path import expanduser
import click
import subprocess
import sys
import shutil
from pkg_resources import resource_filename
from configobj import ConfigObj
import jinja2
from prettytable import PrettyTable
from odooast import odooast
from lxml import etree
from operator import itemgetter
from dyools import Operator
from tree_format import format_tree
from dyools import Logger
__VERSION__ = '0.5.1'
__AUTHOR__ = ''
__WEBSITE__ = ''
__DATE__ = ''
ODOO = 'odoo'
MANIFEST_FILE = '__manifest__.py'
Log = Logger()
def ___set_odoo():
global ODOO
global MANIFEST_FILE
ODOO = 'odoo'
MANIFEST_FILE = '__manifest__.py'
def ___set_openerp():
global ODOO
global MANIFEST_FILE
ODOO = 'openerp'
MANIFEST_FILE = '__openerp__.py'
OPENERP_FILE = '__openerp__.py'
INIT_FILE = '__init__.py'
ADDON_README_FILE = 'README.rst'
PROJECT_README_FILE = 'README.md'
DESCRIPTION = 'description'
MANIFEST_TEMPLATE_FILE = '__manifest__.py'
ADDON_README_TEMPLATE_FILE = 'addon_readme.rst'
PROJECT_README_TEMPLATE_FILE = 'project_readme.md'
ADDON_TEMPLATES_TEMPLATE_FILE = 'templates.xml'
MODELS, VIEWS, WIZARD, CONTROLLERS, SECURITY, DATA, I18N, TESTS, REPORT = 'models', 'views', 'wizard', 'controllers', 'security', 'data', 'i18n', 'tests', 'report'
STATIC, SRC, JS, CSS, XML = 'static', 'src', 'js', 'css', 'xml'
SECURITY_FILE = 'ir.model.access.csv'
CONTROLLER_MAIN_FILE = 'main.py'
home = expanduser("~")
home = os.path.join(home, '.dyvz')
USERS_FILE = os.path.join(home, 'dyv_users.ini')
ADDONS_FILE = os.path.join(home, 'dyv_addons.ini')
PROJECTS_FILE = os.path.join(home, 'dyv_projects.ini')
ASSETS_FILE = os.path.join(home, 'dyv_assets.ini')
REPOS_FILE = os.path.join(home, 'dyv_repos.ini')
CHEMINS_FILE = os.path.join(home, 'dyv_chemins.ini')
def render(tpl_path, context):
resource_path = os.path.sep.join(['dyv', tpl_path])
tpl_path = resource_filename(__name__, resource_path)
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path)
).get_template(filename).render(context)
try:
os.makedirs(home)
except:
pass
if not os.path.exists(USERS_FILE):
with codecs.open(USERS_FILE, mode='w+', encoding='utf-8') as config_file:
pass
if not os.path.exists(ADDONS_FILE):
with codecs.open(ADDONS_FILE, mode='w+', encoding='utf-8') as config_file:
pass
if not os.path.exists(PROJECTS_FILE):
with codecs.open(PROJECTS_FILE, mode='w+', encoding='utf-8') as config_file:
pass
if not os.path.exists(ASSETS_FILE):
with codecs.open(ASSETS_FILE, mode='w+', encoding='utf-8') as config_file:
pass
def l(tmp_v, tmp_len=30):
suffix = ' ***' if len(tmp_v) > tmp_len else ''
return str(tmp_v)[:tmp_len] + suffix if tmp_v else ''
@click.group()
@click.option('--user', '-u', type=click.STRING, help="The user to load")
@click.option('--addon', '-a', type=click.STRING, help="The addon to load")
@click.option('--project', '-p', type=click.STRING, help="The project to load")
@click.option('--odoo-version', '-ov', type=click.INT, default=False, help="The version of Odoo")
@click.option('--no-addon', '-na', type=click.BOOL, default=False, is_flag=True, help="No addon by default")
@click.version_option(__VERSION__, expose_value=False, is_eager=True, help="Show the version")
@click.pass_context
def cli(ctx, user, addon, project, odoo_version, no_addon):
"""CLI for DYV"""
version = odoo_version
if version and version <= 8:
___set_openerp()
else:
___set_odoo()
user_keys = ['name', 'email', 'company', 'website']
addon_keys = ['slug', 'name', 'version', 'summary', 'category', 'description', 'depends', 'icon']
project_keys = ['name', 'slug', 'description', 'year', 'path']
asset_keys = ['path', 'rename']
chemin_keys = ['path']
repo_keys = ['path', 'branch_8', 'branch_9', 'branch_10', 'branch_11', 'branch_12', 'branch_dev', 'branch_master']
config_user_obj = ConfigObj(USERS_FILE, encoding='utf-8')
config_addon_obj = ConfigObj(ADDONS_FILE, encoding='utf-8')
config_project_obj = ConfigObj(PROJECTS_FILE, encoding='utf-8')
config_asset_obj = ConfigObj(ASSETS_FILE, encoding='utf-8')
config_repo_obj = ConfigObj(REPOS_FILE, encoding='utf-8')
config_chemin_obj = ConfigObj(CHEMINS_FILE, encoding='utf-8')
ctx.obj['config_user_obj'] = config_user_obj
ctx.obj['config_addon_obj'] = config_addon_obj
ctx.obj['config_project_obj'] = config_project_obj
ctx.obj['config_asset_obj'] = config_asset_obj
ctx.obj['config_repo_obj'] = config_repo_obj
ctx.obj['config_chemin_obj'] = config_chemin_obj
assets = {}
for asset_section in config_asset_obj.sections:
assets[asset_section] = {
'rename': config_asset_obj[asset_section].get('rename', ''),
'path': config_asset_obj[asset_section].get('path', ''),
}
repos = {}
for repo_section in config_repo_obj.sections:
repos[repo_section] = {
'path': unicode(config_repo_obj[repo_section].get('path', '')).strip(),
'branch_8': unicode(config_repo_obj[repo_section].get('branch_8', '')).strip(),
'branch_9': unicode(config_repo_obj[repo_section].get('branch_9', '')).strip(),
'branch_10': unicode(config_repo_obj[repo_section].get('branch_10', '')).strip(),
'branch_11': unicode(config_repo_obj[repo_section].get('branch_11', '')).strip(),
'branch_12': unicode(config_repo_obj[repo_section].get('branch_12', '')).strip(),
'branch_dev': unicode(config_repo_obj[repo_section].get('branch_dev', '')).strip(),
'branch_master': unicode(config_repo_obj[repo_section].get('branch_master', '')).strip(),
}
chemins = {}
for chemin_section in config_chemin_obj.sections:
chemins[chemin_section] = {
'path': config_chemin_obj[chemin_section].get('path', ''),
}
ctx.obj['assets'] = assets
ctx.obj['repos'] = repos
ctx.obj['chemins'] = chemins
ctx.obj['no_addon'] = no_addon
if user:
if user not in config_user_obj.sections:
click.secho('The user %s not found' % user, fg='red')
sys.exit(-1)
else:
for k in user_keys:
ctx.obj['user_%s' % k] = config_user_obj.get(user, k, '')
if addon:
if addon not in config_addon_obj.sections:
click.secho('The addon %s not found' % addon, fg='red')
sys.exit(-1)
else:
for k in addon_keys:
ctx.obj['addon_%s' % k] = config_addon_obj.get(addon, k, '')
if project:
if project not in config_project_obj.sections:
click.secho('The project %s not found' % project, fg='red')
sys.exit(-1)
else:
for k in project_keys:
ctx.obj['project_%s' % k] = config_project_obj.get(project, k, '')
if not user:
for _sec in config_user_obj.sections:
default = 'default' in config_user_obj[_sec].keys() and config_user_obj.get(_sec).as_bool(
'default') or False
if default:
user = _sec
if not addon:
for _sec in config_addon_obj.sections:
default = 'default' in config_addon_obj[_sec].keys() and config_addon_obj.get(_sec).as_bool(
'default') or False
if default:
addon = _sec
if not project:
for _sec in config_project_obj.sections:
default = 'default' in config_project_obj[_sec].keys() and config_project_obj.get(_sec).as_bool(
'default') or False
if default:
project = _sec
ctx.obj['user'] = user
ctx.obj['addon'] = addon
ctx.obj['project'] = project
ctx.obj['user_keys'] = user_keys
ctx.obj['addon_keys'] = addon_keys
ctx.obj['project_keys'] = project_keys
ctx.obj['asset_keys'] = asset_keys
ctx.obj['repo_keys'] = repo_keys
ctx.obj['chemin_keys'] = chemin_keys
ctx.obj['asset'] = False # FIXME _
ctx.obj['odoo_version'] = version
if user:
click.secho('Use the user %s as default' % user, fg='green')
if addon:
click.secho('Use the addon %s as default' % addon, fg='green')
if project:
click.secho('Use the project %s as default' % project, fg='green')
ctx.obj['items'] = ['user', 'addon', 'project', 'asset']
def check(*elements):
return all([ctx.obj.get(__i, False) for __i in elements])
ctx.obj['check'] = check
def make_this_default(__config, __section):
for tmp_section in __config.sections:
if tmp_section == __section:
__config[tmp_section]['default'] = True
else:
__config[tmp_section]['default'] = False
def __get_items(key):
if key == 'user':
return 'user', USERS_FILE
elif key == 'addon':
return 'addon', ADDONS_FILE
elif key == 'project':
return 'project', PROJECTS_FILE
elif key == 'asset':
return 'asset', ASSETS_FILE
elif key == 'repo':
return 'repo', REPOS_FILE
elif key == 'chemin':
return 'chemin', CHEMINS_FILE
def __create_item(ctx, item_name, item_value):
key, config_path = __get_items(item_name)
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
click.echo('Create new %s %s to the config %s' % (key, item_value, config_path))
if item_value not in config.sections:
config[item_value] = {}
else:
click.secho('The %s %s already exists' % (key, item_value), fg='red')
return
for k in keys:
default = ctx.obj.get('%s_%s' % (key, k), '')
tmp = click.prompt(k, default=default, type=str)
config[item_value][k] = tmp
make_this_default(config, item_value)
config.write()
click.secho('The %s %s is created' % (key, item_value), fg='green')
def __update_item(ctx, item_name, item_value):
key, config_path = __get_items(item_name)
section = ctx.obj.get('%s' % item_name, False)
item_value = item_value or section
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
click.echo('Update %s %s from the config %s' % (key, item_value, config_path))
if item_value not in config.sections:
click.secho('The %s %s not found.' % (key, item_value), fg='red')
return
for k in keys:
default = config[item_value].get(k, '')
tmp = click.prompt(k, default=default, type=str)
config[item_value][k] = tmp
make_this_default(config, item_value)
config.write()
click.secho('The %s %s is updated' % (key, item_value), fg='green')
def __use_section(ctx, item_name, item_value):
if not item_value:
item_value = find_or_create_section_for(ctx, item_name)
key, config_path = __get_items(item_name)
config = ctx.obj['config_%s_obj' % key]
click.echo('Update %s %s from the config %s' % (key, item_value, config_path))
if item_value not in config.sections:
click.secho('The %s %s not found.' % (key, item_value), fg='red')
return
make_this_default(config, item_value)
config.write()
click.secho('The %s %s will be used as default' % (key, item_value), fg='green')
def __delete_section(ctx, item_name, item_values):
key, config_path = __get_items(item_name)
config = ctx.obj['config_%s_obj' % key]
click.echo('Delete %s %s from the config %s' % (key, item_values, config_path))
for item_value in item_values:
if item_value not in config.sections:
click.secho('The %s %s not found.' % (key, item_value), fg='red')
else:
del config[item_value]
click.secho('The %s %s is removed' % (key, item_value), fg='green')
with codecs.open(config_path, mode='wb', encoding='utf-8') as configfile:
config.write(configfile)
def __list_section(ctx, item_name):
key, config_path = __get_items(item_name)
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
click.echo('List %ss from the config %s' % (key, config_path))
x = PrettyTable()
x.field_names = [item_name.title()] + [k.title() for k in keys] + ['Default']
for f in x.field_names:
x.align[f] = 'l'
for section in config.sections:
data = [config[section].get(k, '') for k in keys]
x.add_row([section] + data + [config[section].get('default', '')])
click.echo(x)
def __get_all_keys(ctx, additional_keys={}):
all_keys = {}
for item in ctx.obj['items']:
section = ctx.obj[item]
if section:
key, config_path = __get_items(item)
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
if section not in config.sections:
click.secho('The %s %s not found.' % (item, section), fg='red')
continue
for k in keys:
all_keys['%s_%s' % (item, k)] = config[section].get(k, '')
all_keys.update(additional_keys)
all_keys['odoo_version'] = ctx.obj['odoo_version']
all_keys['assets'] = ctx.obj['assets']
all_keys['repos'] = ctx.obj['repos']
all_keys['chemins'] = ctx.obj['chemins']
all_keys['addon_name_len'] = len(all_keys.get('addon_name', ''))
all_keys['project_name_len'] = len(all_keys.get('project_name', ''))
all_keys['addon_depends'] = [x.strip().lower() for x in
all_keys.get('addon_depends', '').replace(',', ':').replace(';', ':').replace(' ',
':').split(
':') if x]
return all_keys
def __fix_keys(ctx):
for item in ctx.obj['items']:
key, config_path = __get_items(item)
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
for section in config.sections:
for _k in keys:
if _k not in config[section].keys():
config.set(section, _k, '')
with codecs.open(config_path, mode='wb', encoding='utf-8') as configfile:
config.write(configfile)
def find_or_create_section_for(ctx, item_name):
current_path = os.getcwd()
key, config_path = __get_items(item_name)
config = ctx.obj['config_%s_obj' % key]
keys = ctx.obj['%s_keys' % key]
for section in config.sections:
if current_path == config[section].get('path', ''):
make_this_default(config, section)
return section
section = click.prompt('Give a name for the item')
config[section] = {}
for k in keys:
config[section][k] = ''
config[section]['path'] = current_path
make_this_default(config, section)
config.write()
return section
@cli.command()
@click.argument('user', type=click.STRING, required=True)
@click.pass_context
def user_create(ctx, user):
"""Create a new user"""
__create_item(ctx, 'user', user)
@cli.command()
@click.argument('addon', type=click.STRING, required=True)
@click.pass_context
def addon_create(ctx, addon):
"""Create a new addon"""
__create_item(ctx, 'addon', addon)
@cli.command()
@click.argument('project', type=click.STRING, required=True)
@click.pass_context
def project_create(ctx, project):
"""Create a new project"""
__create_item(ctx, 'project', project)
@cli.command()
@click.argument('asset', type=click.STRING, required=True)
@click.pass_context
def asset_create(ctx, asset):
"""Create a new asset"""
__create_item(ctx, 'asset', asset)
@cli.command()
@click.argument('chemin', type=click.STRING, required=True)
@click.pass_context
def path_create(ctx, chemin):
"""Create a new chemin"""
__create_item(ctx, 'chemin', chemin)
@cli.command()
@click.argument('repo', type=click.STRING, required=True)
@click.pass_context
def repo_create(ctx, repo):
"""Create a new repo"""
__create_item(ctx, 'repo', repo)
@cli.command()
@click.argument('user', type=click.STRING, required=False)
@click.pass_context
def user_update(ctx, user):
"""Update a user"""
__update_item(ctx, 'user', user)
@cli.command()
@click.argument('addon', type=click.STRING, required=False)
@click.pass_context
def addon_update(ctx, addon):
"""Update an addon"""
__update_item(ctx, 'addon', addon)
@cli.command()
@click.argument('project', type=click.STRING, required=False)
@click.pass_context
def project_update(ctx, project):
"""Update a project"""
__update_item(ctx, 'project', project)
@cli.command()
@click.argument('asset', type=click.STRING, required=False)
@click.pass_context
def asset_update(ctx, asset):
"""Update an asset"""
__update_item(ctx, 'asset', asset)
@cli.command()
@click.argument('repo', type=click.STRING, required=False)
@click.pass_context
def repo_update(ctx, repo):
"""Update a repo"""
__update_item(ctx, 'repo', repo)
@cli.command()
@click.argument('chemin', type=click.STRING, required=False)
@click.pass_context
def path_update(ctx, chemin):
"""Update a path"""
__update_item(ctx, 'chemin', chemin)
@cli.command()
@click.argument('user', type=click.STRING, required=True)
@click.pass_context
def user_use(ctx, user):
"""Use a user a default"""
__use_section(ctx, 'user', user)
@cli.command()
@click.argument('project', type=click.STRING, required=False)
@click.pass_context
def project_use(ctx, project):
"""Use a project as default"""
__use_section(ctx, 'project', project)
@cli.command()
@click.argument('asset', type=click.STRING, required=False)
@click.pass_context
def asset_use(ctx, asset):
"""Use an asset as default"""
__use_section(ctx, 'asset', asset)
@cli.command()
@click.argument('addon', type=click.STRING, required=True)
@click.pass_context
def addon_use(ctx, addon):
"""Use an addon as default"""
__use_section(ctx, 'addon', addon)
@cli.command()
@click.argument('repo', type=click.STRING, required=True)
@click.pass_context
def repo_use(ctx, repo):
"""Use a repo as default"""
__use_section(ctx, 'repo', repo)
@cli.command()
@click.argument('chemin', type=click.STRING, required=False)
@click.pass_context
def path_use(ctx, chemin):
"""Use a path as default"""
__use_section(ctx, 'chemin', chemin)
@cli.command()
@click.argument('user', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def user_delete(ctx, user):
"""Delete a user"""
__delete_section(ctx, 'user', user)
@cli.command()
@click.argument('addon', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def addon_delete(ctx, addon):
"""Delete an addon"""
__delete_section(ctx, 'addon', addon)
@cli.command()
@click.argument('project', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def project_delete(ctx, project):
"""Delete an project"""
__delete_section(ctx, 'project', project)
@cli.command()
@click.argument('asset', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def asset_delete(ctx, asset):
"""Delete an asset"""
__delete_section(ctx, 'asset', asset)
@cli.command()
@click.argument('chemin', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def path_delete(ctx, chemin):
"""Delete a path"""
__delete_section(ctx, 'chemin', chemin)
@cli.command()
@click.argument('repo', type=click.STRING, required=True, nargs=-1)
@click.pass_context
def repo_delete(ctx, repo):
"""Delete a repo"""
__delete_section(ctx, 'repo', repo)
@cli.command()
@click.pass_context
def users(ctx):
"""Show users"""
__list_section(ctx, 'user')
@cli.command()
@click.pass_context
def addons(ctx):
"""Show addons"""
__list_section(ctx, 'addon')
@cli.command()
@click.pass_context
def projects(ctx):
"""Show projects"""
__list_section(ctx, 'project')
@cli.command()
@click.pass_context
def assets(ctx):
"""Show assets"""
__list_section(ctx, 'asset')
@cli.command()
@click.pass_context
def repos(ctx):
"""Show repos"""
__list_section(ctx, 'repo')
@cli.command()
@click.pass_context
def paths(ctx):
"""Show paths"""
__list_section(ctx, 'chemin')
@cli.command()
@click.pass_context
def table(ctx):
"""Show the table"""
__list_section(ctx, 'project')
__list_section(ctx, 'user')
__list_section(ctx, 'addon')
__list_section(ctx, 'asset')
__list_section(ctx, 'repo')
__list_section(ctx, 'chemin')
@cli.command()
@click.pass_context
def keys(ctx):
"""Show the keys"""
all_keys = __get_all_keys(ctx)
x = PrettyTable()
x.field_names = ['Key', 'Value']
for f in x.field_names:
x.align[f] = 'l'
keys = sorted(all_keys.keys())
for k in filter(lambda s: s not in ['asset', 'repo', 'path'], keys):
x.add_row([k, all_keys.get(k)])
click.echo(x)
@cli.command()
@click.pass_context
def fix_keys(ctx):
"""Fix the keys"""
__fix_keys(ctx)
click.secho('Keys are fixed', fg='green')
# *************** GENERATING DATA ***************#
def hash_model(m, prefix='', suffix='', ext=''):
m = m.strip().lower()
tmp = ''
for x in m:
tmp += x if x.isalnum() or x == '_' else '.'
model_dot = tmp
model_underscore = model_dot.replace('.', '_')
model_class = tmp.replace('.', '_').title().replace('_', '')
model_filename_tab = []
for part in model_dot.split('.')[::-1]:
if part not in model_filename_tab:
model_filename_tab.append(part)
model_filename = '_'.join(model_filename_tab[::-1]) if model_filename_tab else model_underscore
if model_filename.startswith('ir_') and len(model_filename) > 3:
model_filename = model_filename[3:]
if model_filename.startswith('hr_') and len(model_filename) > 3:
model_filename = model_filename[3:]
if model_filename.startswith('res_') and len(model_filename) > 3:
model_filename = model_filename[4:]
if ext:
ext = not ext.startswith('.') and ('.' + ext) or ext
return model_dot, model_underscore, model_class, prefix + model_filename + suffix, prefix + model_filename + suffix + ext
def check_url_and_is_dir(url):
if not url:
return False, 'Please provide an URL %s' % url
if not os.path.exists(url):
return False, 'Url %s not found' % url
if not os.path.isdir(url):
return False, 'Url %s is not a directory' % url
return True, os.path.abspath(url)
def check_url_and_is_file(url):
if not url:
return False, 'Please provide an URL' % url
if not os.path.isfile(url):
return False, 'Url %s is not a file' % url
return True, os.path.abspath(url)
def check_url_and_is_addon(url):
if not url:
return False, 'Please provide an URL' % url
if not os.path.isdir(url):
return False, 'Url %s is not a directory' % url
path_manifest = os.path.sep.join([url, MANIFEST_FILE])
path_openerp = os.path.sep.join([url, OPENERP_FILE])
if os.path.isfile(path_manifest):
return True, path_manifest
if os.path.isfile(path_openerp):
return True, path_openerp
return False, 'The directory %s is not an addon' % url
def fix_addon_version(_ap, all_keys):
_ap_openerp_full = os.path.join(_ap, '__openerp__.py')
_ap_odoo_full = os.path.join(_ap, '__manifest__.py')
odoo_version = all_keys.get('odoo_version', False)
if os.path.isfile(_ap_openerp_full) and (not odoo_version or odoo_version > 8 or odoo_version == 0):
___set_openerp()
all_keys['odoo_version'] = 8
if os.path.isfile(_ap_odoo_full) and (not odoo_version or odoo_version < 9):
___set_odoo()
all_keys['odoo_version'] = 10
return all_keys
def go_and_patch_addon(project_path, addon_slug, all_keys, depends=None, **kwargs):
def _get_templates_attrs(_type, frontend=False, backend=False):
_type = _type.lower().strip()
_tag, _attr, _id, _inherit = 'link', {'rel': 'stylesheet', 'href': None}, 'assets_backend', 'web.assets_backend'
if frontend:
_id = 'assets_frontend'
_inherit = 'website.assets_frontend'
js = _type.startswith('js') and True or False
if js:
_tag, _attr = 'script', {'type': 'text/javascript', 'src': None}
return _tag, _attr, _id, _inherit
def fix_template_asset(_addon_path, _path, _type, frontend=False, backend=False):
_tag, _attr, _id, _inherit = _get_templates_attrs(_type, frontend, backend)
create_dir([_addon_path, VIEWS])
templates_path = os.path.join(_addon_path, VIEWS, 'templates.xml')
asset_file = '/' + _path
create_file([_addon_path, VIEWS, 'templates.xml'], add_content='<?xml version="1.0" encoding="UTF-8"?>',
condition='<?xml', test_on_plat=True)
create_file([_addon_path, VIEWS, 'templates.xml'], add_content='<%s>' % ODOO, test_on_plat=True)
# create_file([_addon_path, VIEWS, 'templates.xml'], add_content=' <data>')
# create_file([_addon_path, VIEWS, 'templates.xml'], add_content=' </data>')
create_file([_addon_path, VIEWS, 'templates.xml'], add_content='</%s>' % ODOO, test_on_plat=True)
add_to_manifest([addon_path, MANIFEST_FILE], [VIEWS, 'templates.xml'])
template_node = etree.parse(templates_path)
root = template_node.getroot()
data = root.find('data')
if data == None:
data = root
template_node = data.find("template[@id='%s']" % _id)
_attr_key = 'unknown'
for _k, _v in _attr.iteritems():
if not _v:
_attr_key = _k
_attr[_k] = asset_file
if template_node == None:
template_node = etree.SubElement(data, 'template', id=_id, inherit_id=_inherit,
name=all_keys.get('addon_slug', 'Name'))
xpath = template_node.find('xpath')
if xpath == None:
xpath = etree.SubElement(template_node, 'xpath', expr='.', position='inside')
file_node = xpath.find('%s[@%s="%s"]' % (_tag, _attr_key, asset_file))
if file_node == None:
etree.SubElement(xpath, _tag, **_attr)
contents = etree.tostring(root, encoding='utf8', xml_declaration=True, pretty_print=True)
create_file([_addon_path, VIEWS, 'templates.xml'], contents=contents)
assets = all_keys.get('assets', {})
click.echo('Patch the addon %s ...' % addon_slug)
click.echo('args : %s' % kwargs)
addon_path = os.path.join(project_path, addon_slug)
if depends:
depends = __clean_depends(depends)
all_keys['addon_depends'] = depends
if not os.path.exists(addon_path):
os.mkdir(addon_path)
fix_addon_version(addon_path, all_keys)
manifest_path = os.path.join(addon_path, MANIFEST_FILE)
root_init_path = os.path.join(addon_path, INIT_FILE)
readme_path = os.path.join(addon_path, ADDON_README_FILE)
if not os.path.isfile(manifest_path):
with codecs.open(manifest_path, encoding='utf-8', mode='w+') as manifest_file:
manifest_file.write(render(MANIFEST_TEMPLATE_FILE, all_keys))
if not os.path.isfile(root_init_path):
with codecs.open(root_init_path, mode='w+', encoding='utf-8') as manifest_file:
manifest_file.write('')
if kwargs.get('readme', []):
readme = kwargs.get('readme', [])
if readme:
if not os.path.isfile(readme_path):
with codecs.open(readme_path, mode='w+', encoding='utf-8') as addon_readme_file:
addon_readme_file.write(render(ADDON_README_TEMPLATE_FILE, all_keys))
if kwargs.get('xml', []):
xml = kwargs.get('xml', [])
if xml:
create_dir([addon_path, STATIC, SRC, XML])
if hasattr(xml, '__iter__'):
for model_class in xml:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(
model_class, prefix='', ext='xml')
xml_file = '/'.join([all_keys.get('addon_slug'), STATIC, SRC, XML, model_filename_ext])
create_file([xml_file], add_content='<?xml version="1.0" encoding="UTF-8"?>', condition='<?xml',
test_on_plat=True)
create_file([xml_file], add_content='<templates xml:space="preserve">', condition='templates',
test_on_plat=True)
create_file([xml_file], add_content='</templates>', condition='templates', test_on_plat=True)
add_to_manifest([addon_path, MANIFEST_FILE], [STATIC, SRC, XML, model_filename_ext], key='qweb')
if kwargs.get('css_backend', []):
css_backend = kwargs.get('css_backend', [])
if css_backend:
create_dir([addon_path, STATIC, SRC, CSS])
if hasattr(css_backend, '__iter__'):
for model_class in css_backend:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(
model_class, prefix='', ext='css')
css_file = '/'.join([all_keys.get('addon_slug'), STATIC, SRC, CSS, model_filename_ext])
create_file([css_file], add_content='')
fix_template_asset(addon_path, css_file, 'css', backend=True)
if kwargs.get('css_frontend', []):
css_frontend = kwargs.get('css_frontend', [])
if css_frontend:
create_dir([addon_path, STATIC, SRC, CSS])
if hasattr(css_frontend, '__iter__'):
for model_class in css_frontend:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(
model_class, prefix='', ext='css')
css_file = '/'.join([all_keys.get('addon_slug'), STATIC, SRC, CSS, model_filename_ext])
create_file([css_file], add_content='')
fix_template_asset(addon_path, css_file, 'css', frontend=True)
if kwargs.get('js_backend', []):
js_backend = kwargs.get('js_backend', [])
if js_backend:
create_dir([addon_path, STATIC, SRC, JS])
if hasattr(js_backend, '__iter__'):
for model_class in js_backend:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(
model_class, prefix='', ext='js')
js_file = '/'.join([all_keys.get('addon_slug'), STATIC, SRC, JS, model_filename_ext])
create_file([js_file], add_content='odoo.define(\'%s.%s\', function(require){' % (
all_keys.get('addon_slug', 'addon'), model_filename))
create_file([js_file], add_content='"use strict";')
create_file([js_file], add_content='});')
fix_template_asset(addon_path, js_file, 'js', backend=True)
if kwargs.get('js_frontend', []):
js_frontend = kwargs.get('js_frontend', [])
if js_frontend:
create_dir([addon_path, STATIC, SRC, JS])
if hasattr(js_frontend, '__iter__'):
for model_class in js_frontend:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(
model_class, prefix='', ext='js')
js_file = '/'.join([all_keys.get('addon_slug'), STATIC, SRC, JS, model_filename_ext])
create_file([js_file], add_content='')
fix_template_asset(addon_path, js_file, 'js', frontend=True)
if kwargs.get('icon', []):
icon = kwargs.get('icon', [])
if icon:
create_dir([addon_path, STATIC, DESCRIPTION])
if hasattr(icon, '__iter__'):
for ico in icon:
destination = os.path.join(addon_path, STATIC, DESCRIPTION, 'icon.png')
source = assets.get(ico, {}).get('path', False)
if source:
shutil.copyfile(source, destination)
if kwargs.get('i18n', []):
i18n = kwargs.get('i18n', [])
if i18n:
create_dir([addon_path, I18N])
if kwargs.get('tests', []):
tests = kwargs.get('tests', [])
if tests:
create_dir([addon_path, TESTS])
create_file([addon_path, TESTS, INIT_FILE])
if hasattr(tests, '__iter__'):
for model_class in tests:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model_class,
prefix='test_',
ext='py')
create_file([addon_path, TESTS, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, TESTS, model_filename_ext], add_content='from %s.tests import common' % ODOO)
create_file([addon_path, TESTS, model_filename_ext],
add_content='class %s(common.TransactionCase):' % model_class)
create_file([addon_path, TESTS, model_filename_ext], add_content=' def setUp(self):')
create_file([addon_path, TESTS, model_filename_ext],
add_content=' super(%s, self).setUp()' % model_class)
create_file([addon_path, TESTS, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('controllers', []):
controllers = kwargs.get('controllers', [])
if controllers:
create_dir([addon_path, CONTROLLERS])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % CONTROLLERS)
create_file([addon_path, CONTROLLERS, INIT_FILE], add_content='from . import main')
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE], add_content='import http, registry')
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE],
add_content='from %s.http import request' % ODOO)
if hasattr(controllers, '__iter__'):
for model_class in controllers:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model_class,
ext='py')
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE],
add_content='class %s(http.Controller):' % model_class)
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE],
add_content=' @http.route(\'/%s/index\', type=\'http\', auth="none")' % model_class)
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE],
add_content=' def %s_index(self, **kw):' % model_class)
create_file([addon_path, CONTROLLERS, CONTROLLER_MAIN_FILE],
add_content=' pass # %s' % model_class)
if kwargs.get('models', []):
models = kwargs.get('models')
if models:
create_dir([addon_path, MODELS])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % MODELS)
create_file([addon_path, MODELS, INIT_FILE])
if hasattr(models, '__iter__'):
for model in models:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='py')
create_file([addon_path, MODELS, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, MODELS, model_filename_ext],
add_content='from %s import models, fields, api, _' % ODOO)
create_file([addon_path, MODELS, model_filename_ext],
add_content='class %s(models.Model):' % model_class)
create_file([addon_path, MODELS, model_filename_ext], add_content=' _name = \'%s\'' % model_dot)
create_file([addon_path, MODELS, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('inherit_models', []):
inherit_models = kwargs.get('inherit_models')
if inherit_models:
create_dir([addon_path, MODELS])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % MODELS)
create_file([addon_path, MODELS, INIT_FILE])
if hasattr(inherit_models, '__iter__'):
for model in inherit_models:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='py')
create_file([addon_path, MODELS, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, MODELS, model_filename_ext],
add_content='from %s import models, fields, api, _' % ODOO)
create_file([addon_path, MODELS, model_filename_ext],
add_content='class %s(models.Model):' % model_class)
create_file([addon_path, MODELS, model_filename_ext], add_content=' _inherit = \'%s\'' % model_dot)
create_file([addon_path, MODELS, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('views', []):
views = kwargs.get('views', [])
if views:
create_dir([addon_path, VIEWS])
if hasattr(views, '__iter__'):
for model in views:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml')
create_file([addon_path, VIEWS, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, VIEWS, model_filename_ext], add_content=' <data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content=' </data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [VIEWS, model_filename_ext])
if kwargs.get('inherit_views', []):
inherit_views = kwargs.get('inherit_views', [])
if inherit_views:
create_dir([addon_path, VIEWS])
if hasattr(inherit_views, '__iter__'):
for model in inherit_views:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml')
create_file([addon_path, VIEWS, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, VIEWS, model_filename_ext], add_content=' <data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content=' </data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [VIEWS, model_filename_ext])
if kwargs.get('wizard', []):
wizard = kwargs.get('wizard', [])
if wizard:
create_dir([addon_path, WIZARD])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % WIZARD)
create_file([addon_path, WIZARD, INIT_FILE])
if hasattr(wizard, '__iter__'):
for model in wizard:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml')
create_file([addon_path, WIZARD, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, WIZARD, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, WIZARD, model_filename_ext], add_content=' <data>')
create_file([addon_path, WIZARD, model_filename_ext], add_content=' </data>')
create_file([addon_path, WIZARD, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [WIZARD, model_filename_ext])
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='py')
create_file([addon_path, WIZARD, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, WIZARD, model_filename_ext],
add_content='from %s import models, fields, api, _' % ODOO)
create_file([addon_path, WIZARD, model_filename_ext],
add_content='class %s(models.TransientModel):' % model_class)
create_file([addon_path, WIZARD, model_filename_ext], add_content=' _name = \'%s\'' % model_dot)
create_file([addon_path, WIZARD, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('report', []):
report = kwargs.get('report', [])
if report:
create_dir([addon_path, VIEWS])
if hasattr(report, '__iter__'):
for model in report:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml',
suffix='_report')
create_file([addon_path, VIEWS, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, VIEWS, model_filename_ext], add_content=' <data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content=' </data>')
create_file([addon_path, VIEWS, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [VIEWS, model_filename_ext])
if kwargs.get('parser', []):
parser = kwargs.get('parser', [])
if parser:
create_dir([addon_path, REPORT])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % REPORT)
create_file([addon_path, REPORT, INIT_FILE])
if hasattr(parser, '__iter__'):
for model in parser:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='py')
create_file([addon_path, REPORT, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, REPORT, model_filename_ext],
add_content='from %s import models, fields, api, _' % ODOO)
create_file([addon_path, REPORT, model_filename_ext],
add_content='class %s(models.AbstractModel):' % model_class)
create_file([addon_path, REPORT, model_filename_ext],
add_content=' _name = \'report.%s\'' % model_dot)
create_file([addon_path, REPORT, model_filename_ext], add_content=' @api.model')
create_file([addon_path, REPORT, model_filename_ext],
add_content=' def render_html(self, docids, data=None):', )
create_file([addon_path, REPORT, model_filename_ext],
add_content=' self.model = self.env.context.get(\'active_model\')', )
create_file([addon_path, REPORT, model_filename_ext],
add_content=' docs = self.env[self.model].browse(self.env.context.get(\'active_ids\', []))', )
create_file([addon_path, REPORT, model_filename_ext], add_content=' docargs = {', )
create_file([addon_path, REPORT, model_filename_ext], add_content=' \'doc_ids\': docids,', )
create_file([addon_path, REPORT, model_filename_ext],
add_content=' \'doc_model\': self.model,', )
create_file([addon_path, REPORT, model_filename_ext], add_content=' \'docs\': docs,', )
create_file([addon_path, REPORT, model_filename_ext], add_content=' }', )
create_file([addon_path, REPORT, model_filename_ext],
add_content=' return self.env[\'report\'].render(\'%s\', docargs)' % model_dot, )
create_file([addon_path, REPORT, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('inherit_wizard', []):
inherit_wizard = kwargs.get('inherit_wizard', [])
if inherit_wizard:
create_dir([addon_path, WIZARD])
create_file([addon_path, INIT_FILE], add_content='from . import %s' % WIZARD)
create_file([addon_path, WIZARD, INIT_FILE])
if hasattr(inherit_wizard, '__iter__'):
for model in inherit_wizard:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml')
create_file([addon_path, WIZARD, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, WIZARD, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, WIZARD, model_filename_ext], add_content=' <data>')
create_file([addon_path, WIZARD, model_filename_ext], add_content=' </data>')
create_file([addon_path, WIZARD, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [WIZARD, model_filename_ext])
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='py')
create_file([addon_path, WIZARD, model_filename_ext], add_content='# -*- coding: utf-8 -*-')
create_file([addon_path, WIZARD, model_filename_ext],
add_content='from %s import models, fields, api, _' % ODOO)
create_file([addon_path, WIZARD, model_filename_ext],
add_content='class %s(models.TransientModel):' % model_class)
create_file([addon_path, WIZARD, model_filename_ext], add_content=' _inherit = \'%s\'' % model_dot)
create_file([addon_path, WIZARD, INIT_FILE], add_content='from . import %s' % model_filename)
if kwargs.get('data', []):
data = kwargs.get('data', [])
if data:
create_dir([addon_path, DATA])
if hasattr(data, '__iter__'):
for model in data:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model,
ext='xml')
create_file([addon_path, DATA, model_filename_ext],
add_content='<?xml version="1.0" encoding="UTF-8"?>')
create_file([addon_path, DATA, model_filename_ext], add_content='<%s>' % ODOO)
create_file([addon_path, DATA, model_filename_ext], add_content=' <data>')
create_file([addon_path, DATA, model_filename_ext], add_content=' </data>')
create_file([addon_path, DATA, model_filename_ext], add_content='</%s>' % ODOO)
add_to_manifest([addon_path, MANIFEST_FILE], [DATA, model_filename_ext])
if kwargs.get('security', []):
security = kwargs.get('security', [])
if security:
create_dir([addon_path, SECURITY])
create_file([addon_path, SECURITY, SECURITY_FILE],
add_content='id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink')
add_to_manifest([addon_path, MANIFEST_FILE], [SECURITY, SECURITY_FILE])
if hasattr(security, '__iter__'):
for model in security:
model_dot, model_underscore, model_class, model_filename, model_filename_ext = hash_model(model)
create_file([addon_path, SECURITY, SECURITY_FILE],
add_content='access_%s_user,%s.user,model_%s,,1,0,0,0' % (
model_underscore, model_dot, model_underscore))
create_file([addon_path, SECURITY, SECURITY_FILE],
add_content='access_%s_manager,%s.manager,model_%s,,1,1,1,1' % (
model_underscore, model_dot, model_underscore))
click.secho("Addon <%s> patched in the project <%s>" % (addon_slug, project_path), fg='green')
def __clean_depends(deps):
if hasattr(deps, '__iter__'):
return list(deps)
if isinstance(deps, basestring):
deps = deps.strip().replace(' ', ':').replace(';', ';').replace(',', ':')
deps = [x.strip().lower() for x in deps.split(':') if x.strip()]
return deps
def create_dir(paths):
path = os.path.sep.join(paths)
if not os.path.exists(path):
os.makedirs(path)
def add_to_manifest(manifest_paths, file_paths, key='data'):
manifest_file = os.path.sep.join(manifest_paths)
file_path = '/'.join(file_paths)
_insert_manifest_item(manifest_file, key, file_path)
def _insert_manifest_item(manifest_file, key, item):
""" Insert an item in the list of an existing manifest key """
with codecs.open(manifest_file, ) as f:
manifest = f.read()
if item in eval(manifest).get(key, []):
return
pattern = """(["']{}["']:\\s*\\[)""".format(key)
repl = """\\1\n '{}',""".format(item)
manifest = re.sub(pattern, repl, manifest, re.MULTILINE)
with codecs.open(manifest_file, mode='w+', encoding='utf-8') as f:
f.write(manifest.decode(encoding='utf-8'))
def create_file(paths, contents=None, add_content=None, condition=None, test_on_plat=False):
assert hasattr(paths, '__iter__'), 'paths should be a list or tuple'
py_file = os.path.sep.join(paths)
if not os.path.isfile(py_file):
with codecs.open(py_file, mode='w+', encoding='utf-8') as pyfile:
pyfile.write('')
if contents:
if contents not in open(py_file, 'r').read():
with codecs.open(py_file, mode='w+', encoding='utf-8') as pyfile:
pyfile.write(contents)
content_lines = [x.rstrip() for x in open(py_file, 'r').readlines() if x]
plat_content = ''.join(content_lines).lower().strip()
content_to_test = plat_content if test_on_plat else content_lines
if add_content:
if (not condition and add_content not in content_to_test) or (condition and condition not in content_to_test):
with codecs.open(py_file, mode='a+', encoding='utf-8') as pyfile:
if len(content_lines) > 0 and content_lines[-1].strip():
add_content = '\n' + add_content
pyfile.write(add_content)
@cli.command()
@click.argument('module_name', type=click.STRING, required=False)
@click.pass_context
def addon_patch(ctx, module_name):
"""Create or update an addon"""
all_keys = __get_all_keys(ctx, )
if not ctx.obj['check']('user', 'project'):
click.secho('please provide a user and a project', fg='red')
return
if not module_name and not ctx.obj['check']('addon'):
click.secho('please provide an addon', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
if module_name and module_name != all_keys.get('addon_slug', False):
all_keys['addon_slug'] = module_name
project_addon_path = os.path.sep.join([all_keys.get('project_path', ''), module_name])
pass_a, msg_a = check_url_and_is_addon(project_addon_path)
if not pass_a:
click.secho('Addon : %s' % msg_a, fg='red')
return
if not all_keys.get('addon_slug', False):
click.secho('please provide a name of the addon', fg='red')
return
addon_slug = module_name or all_keys.get('addon_slug')
project_path = all_keys.get('project_path')
os.chdir(project_path)
addon_path = os.path.join(project_path, addon_slug)
all_keys = fix_addon_version(addon_path, all_keys)
fuzzy_keywords = """
Keywords:
models inherit_models views inherit_views wizard inherit_wizard
data controllers security i18n tests icon description readme
js_frontend css_frontend js_backend css_backend xml report parser
"""
click.secho(fuzzy_keywords, fg='blue')
if click.confirm('Continue to patch the addon <%s> in the project "%s" for the version <%s>' % (
addon_slug, project_path, all_keys.get('odoo_version'))):
fuzzy = click.prompt('Enter the fuzzy string')
fuzzy = fuzzy.strip().lower()
to_replace = ' ,:;-/@#&+'
for tr in to_replace:
fuzzy = fuzzy.replace(tr, '=')
fuzzy = [x.strip() for x in fuzzy.split('=') if x]
models = []
groups = {}
item_found = False
for item in fuzzy:
if item in ['js', 'css']:
click.secho('Please indicate if it is backend or frontend, backend will be used as default',
fg='yellow')
item += '_backend'
if item.startswith('js') and item not in ['js_frontend', 'js_backend']:
item2 = 'js_backend'
click.secho('%s is not recognized, we will use %s' % (item, item2), fg='yellow')
item = item2
if item.startswith('css') and item not in ['css_frontend', 'css_backend']:
item2 = 'css_backend'
click.secho('%s is not recognized, we will use %s' % (item, item2), fg='yellow')
item = item2
if item.strip().lower().replace('_', '').replace('s', '') in ['inheritmodel', 'modelinherit']:
item = 'inherit_models'
if item.strip().lower().replace('_', '').replace('s', '') in ['inheritview', 'viewinherit']:
item = 'inherit_views'
if item.strip().lower().replace('_', '').replace('s', '') in ['inheritwizard', 'wizardinherit']:
item = 'inherit_wizard'
if item in ['models', 'inherit_models', 'views', 'inherit_views', 'wizard', 'inherit_wizard', 'data',
'controllers', 'security', 'i18n', 'tests', 'icon',
'description', 'readme', 'js_frontend', 'css_frontend', 'js_backend', 'css_backend', 'xml',
'report', 'parser']:
if item not in groups:
groups[item] = models[:] or True
else:
if hasattr(groups[item], '__iter__'):
groups[item] += models[:]
item_found = True
else:
if item_found:
models = []
item_found = False
models.append(item)
go_and_patch_addon(all_keys.get('project_path'), all_keys.get('addon_slug'), all_keys, **groups)
else:
click.secho('Exit', fg='red')
@cli.command()
@click.pass_context
def project_patch(ctx):
"""Init or patch a project"""
all_keys = __get_all_keys(ctx)
if not ctx.obj['check']('user', 'project'):
click.secho('please provide a user and a project', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
if not all_keys.get('project_slug', False):
click.secho('please provide a slug for the project', fg='red')
return
project_slug = all_keys.get('project_slug', '')
project_name = all_keys.get('project_name', '')
addons = {
'%s_base' % project_slug: {
'addon_depends': 'base',
'addon_slug': '%s_base' % project_slug,
'addon_name': '%s - Base' % project_name,
'addon_category': 'Tools',
'addon_summary': 'Module de base pour %s' % project_name,
'addon_description': u"""
L'objectif de ce module est :
* Déclarer toutes les dépendances avec les modules standard et communautaires d'Odoo
* Ce module doit être déclaré dans les nouveaux modules créés
* Pour les nouveaux modules, il ne devrait pas dépendre des modules standard mais de ce module
* Pour mettre à jour les modules du projet, il suffit de mettre à jour ce module""",
},
'%s_recette' % project_slug: {
'addon_depends': '%s_base' % project_slug,
'addon_slug': '%s_recette' % project_slug,
'addon_name': '%s - Recette' % project_name,
'addon_category': 'Tools',
'addon_summary': 'Module de recette pour %s' % project_name,
'addon_description': u"""
L'objectif de ce module est de :
* Dépendre de tous les les modules spécifiques du projet
* Installer tous les modules lorsque ce module est installé
* Paraméter les données de la société""",
'args': {
'data': ['company'],
}
},
'%s_demo' % project_slug: {
'addon_depends': '%s_recette' % project_slug,
'addon_slug': '%s_demo' % project_slug,
'addon_name': u'%s - Démo' % project_name,
'addon_category': 'Tools',
'addon_summary': u'Module de démonstration pour %s' % project_name,
'addon_description': u"""
L'objectif de ce module est de :
* Préparer des données pour la démonstration""",
'args': {
'data': True,
}
},
}
for addon, additional_keys in addons.iteritems():
all_keys = __get_all_keys(ctx, additional_keys)
go_and_patch_addon(all_keys.get('project_path'), all_keys.get('addon_slug'), all_keys,
**additional_keys.get('args', {}))
readme_path = os.path.join(all_keys.get('project_path'), PROJECT_README_FILE)
if not os.path.isfile(readme_path):
with codecs.open(readme_path, encoding='utf-8', mode='w+') as readme_file:
readme_file.write(render(PROJECT_README_TEMPLATE_FILE, all_keys))
@cli.command()
@click.argument('addon', type=click.STRING, required=False)
@click.option('model', '-m', type=click.STRING, default=[], multiple=True, required=False)
@click.option('field', '-f', type=click.STRING, default=[], multiple=True, required=False)
@click.pass_context
def models(ctx, addon, model, field):
"""Show addon models"""
all_keys = __get_all_keys(ctx)
if not ctx.obj['check']('project'):
click.secho('please provide a project', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
project_path = all_keys.get('project_path')
if addon:
project_path = os.path.join(project_path, addon)
elif not ctx.obj['no_addon'] and all_keys.get('addon_slug', ''):
project_path = os.path.join(project_path, all_keys.get('addon_slug'))
click.secho('Dir to scan is %s' % project_path, fg='blue')
project_dir = odooast.AstDir(project_path)
astobj = odooast.AstFile(project_dir.get_py_files())
ast_models = astobj.get_models(model_args=model, field_args=field)
x = PrettyTable()
x.field_names = ['Model', 'Base Class', 'Inherit']
for f in x.field_names:
x.align[f] = 'l'
for ast_model, ast_model_data in ast_models:
model_classes = ast_model_data.get('classes', [])
model_base_classes = ast_model_data.get('base_classes', [])
model_inherits = ast_model_data.get('inherits', [])
x.add_row([ast_model, ', '.join(model_base_classes), ', '.join(model_inherits)])
click.echo(x)
@cli.command()
@click.argument('addon', type=click.STRING, required=False)
@click.option('model', '-m', type=click.STRING, default=[], multiple=True, required=False)
@click.option('field', '-f', type=click.STRING, default=[], multiple=True, required=False)
@click.pass_context
def fields(ctx, addon, model, field):
"""Show addon fields for all models"""
all_keys = __get_all_keys(ctx)
if not ctx.obj['check']('project'):
click.secho('please provide a project', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
project_path = all_keys.get('project_path')
if addon:
project_path = os.path.join(project_path, addon)
elif not ctx.obj['no_addon'] and all_keys.get('addon_slug', ''):
project_path = os.path.join(project_path, all_keys.get('addon_slug'))
project_dir = odooast.AstDir(project_path)
astobj = odooast.AstFile(project_dir.get_py_files())
ast_models = astobj.get_models(model_args=model, field_args=field)
click.secho('Dir to scan is %s, model=%s, found=%s' % (project_path, model, len(ast_models)), fg='blue')
for ast_model, ast_model_data in ast_models:
click.secho('', fg='blue')
click.secho('Model : %s, Inherits : %s' % (ast_model, ','.join(ast_model_data.get('inherits', []))), fg='blue')
for ast_model_path in ast_model_data.get('paths', []):
click.secho('Path : %s' % ast_model_path, fg='blue')
x = PrettyTable()
x.field_names = ['Field', 'Type', 'Main', 'Required', 'OnChange', 'Compute', 'Inverse', 'Search', 'Store']
for f in x.field_names:
x.align[f] = 'l'
for _field, field_data in ast_model_data.get('fields', {}).iteritems():
field_type = field_data.get('type', '')
field_required = field_data.get('required', '')
field_onchange = field_data.get('onchange', '')
field_compute = field_data.get('compute', '')
field_inverse = field_data.get('inverse', '')
field_search = field_data.get('search', '')
field_store = field_data.get('store', '')
field_relation = field_data.get('comodel_name', '')
if field_type and '2' in str(field_type):
if not field_relation:
field_relation = field_data.get('relation', '')
if not field_relation:
field_relation = field_data.get('without_0', '')
x.add_row([_field, l(field_type), l(field_relation), l(field_required), l(field_onchange), l(field_compute),
l(field_inverse),
l(field_search), l(field_store)])
click.echo(x)
@cli.command()
@click.argument('addon', type=click.STRING, required=False)
@click.option('model', '-m', type=click.STRING, default=[], multiple=True, required=False)
@click.option('func', '-f', type=click.STRING, default=[], multiple=True, required=False)
@click.pass_context
def funcs(ctx, addon, model, func):
"""Show addon functions for all models"""
all_keys = __get_all_keys(ctx)
if not ctx.obj['check']('project'):
click.secho('please provide a project', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
project_path = all_keys.get('project_path')
if addon:
project_path = os.path.join(project_path, addon)
elif not ctx.obj['no_addon'] and all_keys.get('addon_slug', ''):
project_path = os.path.join(project_path, all_keys.get('addon_slug'))
project_dir = odooast.AstDir(project_path)
astobj = odooast.AstFile(project_dir.get_py_files())
ast_models = astobj.get_models(model_args=model, func_args=func)
click.secho('Dir to scan is %s, model=%s, found=%s' % (project_path, model, len(ast_models)), fg='blue')
for ast_model, ast_model_data in ast_models:
click.secho('', fg='blue')
click.secho('Model : %s, Inherits : %s' % (ast_model, ','.join(ast_model_data.get('inherits', []))), fg='blue')
for ast_model_path in ast_model_data.get('paths', []):
click.secho('Path : %s' % ast_model_path, fg='blue')
x = PrettyTable()
x.field_names = ['Func', 'Args']
for f in x.field_names:
x.align[f] = 'l'
for _func, func_data in ast_model_data.get('funcs', {}).iteritems():
x.add_row([_func, l(','.join([str(y) for y in func_data]), 80)])
click.echo(x)
@cli.command()
@click.option('--here', '-h', is_flag=True, type=click.BOOL, default=False)
@click.option('--external', '-e', is_flag=True, type=click.BOOL, default=False)
@click.option('--addon', '-a', type=click.STRING, multiple=True)
@click.option('--recursion', '-r', is_flag=True, type=click.BOOL, default=False)
@click.pass_context
def tree(ctx, here, external, addon, recursion):
"""Show the tree of the addons dependencies"""
filter_addon = addon
all_keys = __get_all_keys(ctx)
if not here:
if not ctx.obj['check']('project'):
click.secho('please provide a project', fg='red')
return
pass_p, msg_p = check_url_and_is_dir(all_keys.get('project_path', ''))
if not pass_p:
click.secho('Project : %s' % msg_p, fg='red')
return
project_path = all_keys.get('project_path')
else:
project_path = os.getcwd()
addons = {}
for root, dirs, files in os.walk(project_path):
for name in files:
file = os.path.join(root, name)
if name in ['__openerp__.py', '__manifest__.py']:
addon = os.path.basename(root)
depends = eval(open(file).read()).get('depends', [])
addons[addon] = depends
arbre = ['root', []]
root = arbre[1]
node_result = None
def get_node(root, item):
global node_result
node_result = None
def _get_node(_arbre, item):
global node_result
if _arbre and isinstance(_arbre, list):
if isinstance(_arbre[0], basestring):
if item == _arbre[0]:
if node_result is None:
node_result = []
if _arbre[1] not in node_result:
node_result.append(_arbre[1])
else:
_get_node(_arbre[1], item)
else:
for obj in _arbre:
_get_node(obj, item)
_get_node(root, item)
return node_result if node_result is not None else [root]
external_dependecies = filter(lambda r: r not in addons.keys(), list(set(Operator.flat(addons.values()))))
if external:
for ext_depend in external_dependecies:
root.append([ext_depend, []])
to_process, processed = addons.keys(), []
def rotate(l):
x = -1
return l[-x:] + l[:-x]
conflicts = {}
hits = []
while to_process:
addon, depends = to_process[0], addons[to_process[0]]
len_to_process = len(to_process)
has_dependencies = False
to_rotate, just_rotate = False, False
for depend in depends:
if not external and depend not in addons.keys():
continue
has_dependencies = True
if depend in to_process:
# if addon not in hits:
# hits.append(addon)
# just_rotate = True
# break
conflicts[(addon, depend, len_to_process)] = conflicts.get((addon, depend, len_to_process), 0) + 1
to_rotate = True
break
parents = get_node(root, depend)
item_addon = [addon, []]
for parent in parents:
if item_addon not in parent:
parent.append(item_addon)
if just_rotate:
to_process = rotate(to_process)
continue
if to_rotate:
to_process = rotate(to_process)
for depend in depends:
if conflicts.get((depend, addon, len_to_process), 1) > 1:
Log.warn("Recursion on addons %s <=> %s" % (addon, depend))
to_process.remove(addon)
processed.append(addon)
break
continue
if not has_dependencies:
root.append([addon, []])
to_process.remove(addon)
processed.append(addon)
if recursion:
Log.success('End of verification.', exit=True)
def filter_addons(arbre, whitelist):
full_list = arbre
to_delete = []
def drop_subtree(full_list, whitelist, to_delete):
values = full_list[1]
for i, value in enumerate(values):
if not (set(whitelist) & set(Operator.flat(value))):
to_delete.append((full_list, values, values[i], i))
else:
for next_item in values:
drop_subtree(next_item, whitelist, to_delete)
drop_subtree(full_list, whitelist, to_delete)
while to_delete:
full_list, values, value, index = to_delete[0]
if value in full_list[1]:
index = full_list[1].index(value)
del full_list[1][index]
del to_delete[0]
return arbre
if filter_addon:
arbre = filter_addons(arbre, filter_addon)
Log.info(format_tree(
arbre, format_node=itemgetter(0), get_children=itemgetter(1)))
@cli.command()
@click.pass_context
def where(ctx):
"""Show project path"""
all_keys = __get_all_keys(ctx)
project_path = all_keys.get('project_path', '')
addon_slug = all_keys.get('addon_slug', '')
click.echo("Project path : %s" % project_path)
click.echo("Addon slug : %s" % addon_slug)
#################### GIT RELATED COMMANDS
def __init_path_with_branches(tmp_path, tmp_branches):
for tmp_br in tmp_branches:
tmp_path_branch = os.path.join(tmp_path, tmp_br)
try:
os.makedirs(tmp_path_branch)
except:
pass
def __execute_git_command(cmd, git_url, git_dest, git_branch, git_depth, folder_name=None, git_n=4):
git_dest = os.path.normpath(git_dest)
os.chdir(git_dest)
if cmd == 'clone':
cmds = ['git', 'clone', git_url]
if git_depth > 0:
cmds += ['--depth', str(git_depth)]
if git_branch:
cmds += ['-b', git_branch]
if folder_name:
cmds += [folder_name]
elif cmd == 'checkout':
if folder_name:
os.chdir(os.path.join(git_dest, folder_name))
cmds = ['git', 'checkout', git_branch]
elif cmd == 'pull':
if folder_name:
os.chdir(os.path.join(git_dest, folder_name))
cmds = ['git', 'pull', 'origin', git_branch]
if git_depth > 0:
cmds += ['--depth', str(git_depth)]
elif cmd == 'status':
if folder_name:
os.chdir(os.path.join(git_dest, folder_name))
cmds = ['git', 'status']
elif cmd == 'log':
if folder_name:
os.chdir(os.path.join(git_dest, folder_name))
cmds = ['git', 'log', '--pretty=format:"%h%x09%x09%ad%x09%s', '--date', 'iso']
if git_n > 0:
cmds += ['-%s' % git_n]
click.secho('Directory : %s' % os.getcwd(), fg='yellow')
click.secho('URL : %s, Branch : %s' % (git_url, git_branch), fg='yellow')
click.secho('Cmd : %s' % ' '.join(cmds), fg='yellow')
click.echo("-" * 40)
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = p.communicate()
if err:
click.secho(err, fg='red')
click.echo("-" * 40)
return False
if out:
click.secho(out, fg='green')
click.echo("-" * 40)
click.echo("\n\n")
return True
def __get_git_vars(ctx, repos, path, branch, all, depth, number):
if isinstance(repos, basestring):
repos = repos.replace(',', ';').split(';')
result = []
all_keys = __get_all_keys(ctx)
if not repos:
click.secho('Please provide some repos', fg='red')
return False
for repo in repos:
repo_arg = repo
repo = all_keys.get('repos', {}).get(repo_arg, False)
if not repo:
click.secho('The repo %s is not found' % (repo or repo_arg), fg='red')
return False
if path:
if os.path.isdir(path):
pass
else:
path = all_keys.get('chemins', {}).get(path, {}).get('path', False)
if not path:
click.secho('The path %s is not found' % path, fg='red')
return False
else:
path = all_keys.get('chemins', {}).get('PR', {}).get('path', False)
if not path:
click.secho('The path %s is not found' % path, fg='red')
return
branches = []
for _k, _v in all_keys.get('repos').get(repo_arg).iteritems():
if _k.startswith('branch_'):
_k = _k[7:]
if _v:
branches.append((_k, _v))
if not all:
if not branch:
click.secho('Please specify a branch')
return False
branches = filter(lambda (m, n): m in branch or n in branch, branches)
branch_paths = [x[0] for x in branches]
branch_names = [x[1] for x in branches]
if not os.path.isdir(path):
click.secho('The path %s is not found' % path, fg='red')
return False
__init_path_with_branches(path, branch_paths)
for br_path, br_name in branches:
branch_path = os.path.join(path, br_path)
result.append((repo.get('path'), branch_path, br_name, depth, repo_arg, number))
return result
@cli.command()
@click.argument('cmds', type=click.STRING, required=True)
@click.argument('repos', type=click.STRING, required=True)
@click.argument('path', type=click.STRING, required=False)
@click.option('branch', '-b', type=click.STRING, default=[], multiple=True, required=False)
@click.option('--all', type=click.BOOL, default=False, is_flag=True)
@click.option('--depth', type=click.INT, default=1)
@click.option('-n', '--number', type=click.INT, default=4)
@click.pass_context
def git(ctx, cmds, repos, path, branch, all, depth, number):
"""pull2 a repository"""
cmds = cmds.replace(',', ';').split(';') if isinstance(cmds, basestring) else cmds
results = __get_git_vars(ctx, repos, path, branch, all, depth, number)
if not results: return
for git_path, git_dir, git_branch, git_depth, git_dirname, git_number in results:
for cmd in cmds:
if cmd == 'pull':
__execute_git_command('checkout', git_path, git_dir, git_branch, git_depth, git_dirname, git_number)
__execute_git_command('pull', git_path, git_dir, git_branch, git_depth, git_dirname, git_number)
elif cmd == 'log':
__execute_git_command('log', git_path, git_dir, git_branch, git_depth, git_dirname, git_number)
elif cmd == 'status':
__execute_git_command('status', git_path, git_dir, git_branch, git_depth, git_dirname, git_number)
elif cmd == 'clone':
__execute_git_command('clone', git_path, git_dir, git_branch, git_depth, git_dirname, git_number)
else:
click.secho('The command %s is not implemented' % cmd, fg='red')
if __name__ == '__main__':
cli(obj={})
def main():
return cli(obj={}) | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/lib/js/angular-openlayers-directive.min.js | !function(a,b){if("function"==typeof require&&"object"==typeof exports){var c=require("openlayers");exports.angularOpenlayersDirective=b(c)}else"function"==typeof define&&define.amd?define(["ol"],function(c){return a.angularOpenlayersDirective=b(c)}):a.angularOpenlayersDirective=b(a.ol)}(this,function(a){angular.module("openlayers-directive",["ngSanitize"]).directive("openlayers",["$log","$q","$compile","olHelpers","olMapDefaults","olData",function(b,c,d,e,f,g){return{restrict:"EA",transclude:!0,replace:!0,scope:{center:"=olCenter",defaults:"=olDefaults",view:"=olView",events:"=olEvents"},template:'<div class="angular-openlayers-map" ng-transclude></div>',controller:["$scope",function(a){var b=c.defer();a.getMap=function(){return b.promise},a.setMap=function(a){b.resolve(a)},this.getOpenlayersScope=function(){return a}}],link:function(b,c,d){var h=e.isDefined,i=e.createLayer,j=e.setMapEvents,k=e.setViewEvents,l=e.createView,m=f.setDefaults(b);h(d.width)&&(isNaN(d.width)?c.css("width",d.width):c.css("width",d.width+"px")),h(d.height)&&(isNaN(d.height)?c.css("height",d.height):c.css("height",d.height+"px")),h(d.lat)&&(m.center.lat=parseFloat(d.lat)),h(d.lon)&&(m.center.lon=parseFloat(d.lon)),h(d.zoom)&&(m.center.zoom=parseFloat(d.zoom));var n=a.control.defaults(m.controls),o=a.interaction.defaults(m.interactions),p=l(m.view),q=new a.Map({target:c[0],controls:n,interactions:o,renderer:m.renderer,view:p,loadTilesWhileAnimating:m.loadTilesWhileAnimating,loadTilesWhileInteracting:m.loadTilesWhileInteracting});if(b.$on("$destroy",function(){g.resetMap(d.id),q.setTarget(null),q=null}),!d.customLayers){var r={type:"Tile",source:{type:"OSM"}},s=i(r,p.getProjection(),"default");q.addLayer(s),q.set("default",!0)}if(!h(d.olCenter)){var t=a.proj.transform([m.center.lon,m.center.lat],m.center.projection,p.getProjection());p.setCenter(t),p.setZoom(m.center.zoom)}j(m.events,q,b),k(m.events,q,b),b.setMap(q),g.setMap(q,d.id)}}}]),angular.module("openlayers-directive").directive("olCenter",["$log","$location","olMapDefaults","olHelpers",function(b,c,d,e){return{restrict:"A",scope:!1,replace:!1,require:"openlayers",link:function(f,g,h,i){var j=e.safeApply,k=e.isValidCenter,l=e.isDefined,m=e.isArray,n=e.isNumber,o=e.isSameCenterOnMap,p=e.setCenter,q=e.setZoom,r=i.getOpenlayersScope();r.getMap().then(function(f){var g=d.getDefaults(r),i=f.getView(),s=r.center;if(-1!==h.olCenter.search("-"))return b.error('[AngularJS - Openlayers] The "center" variable can\'t use a "-" on his key name: "'+h.center+'".'),void p(i,g.view.projection,g.center,f);l(s)||(s={}),k(s)||(b.warn("[AngularJS - Openlayers] invalid 'center'"),s.lat=g.center.lat,s.lon=g.center.lon,s.zoom=g.center.zoom,s.projection=g.center.projection),s.projection||("pixel"!==g.view.projection?s.projection=g.center.projection:s.projection="pixel"),n(s.zoom)||(s.zoom=1),p(i,g.view.projection,s,f),i.setZoom(s.zoom);if(!0===s.centerUrlHash){var t=function(){var a,b=c.search();if(l(b.c)){var d=b.c.split(":");3===d.length&&(a={lat:parseFloat(d[0]),lon:parseFloat(d[1]),zoom:parseInt(d[2],10)})}return a};t(),r.$on("$locationChangeSuccess",function(){var a=t();a&&!o(a,f)&&j(r,function(b){b.center.lat=a.lat,b.center.lon=a.lon,b.center.zoom=a.zoom})})}var u;r.$watchCollection("center",function(c){if(c){if(c.projection||(c.projection=g.center.projection),c.autodiscover)return u||(u=new a.Geolocation({projection:a.proj.get(c.projection)}),u.on("change",function(){if(c.autodiscover){var a=u.getPosition();j(r,function(b){b.center.lat=a[1],b.center.lon=a[0],b.center.zoom=12,b.center.autodiscover=!1,u.setTracking(!1)})}})),void u.setTracking(!0);k(c)||(b.warn("[AngularJS - Openlayers] invalid 'center'"),c=g.center);var d=i.getCenter();if(d)if("pixel"===g.view.projection||"pixel"===c.projection)i.setCenter(c.coord);else{var e=a.proj.transform(d,g.view.projection,c.projection);e[1]===c.lat&&e[0]===c.lon||p(i,g.view.projection,c,f)}i.getZoom()!==c.zoom&&q(i,c.zoom,f)}});var v=f.on("moveend",function(){j(r,function(b){if(l(b.center)){var d=f.getView().getCenter();if(b.center.zoom=i.getZoom(),"pixel"===g.view.projection||"pixel"===b.center.projection)return void(b.center.coord=d);if(b.center){var h=a.proj.transform(d,g.view.projection,b.center.projection);if(b.center.lat=h[1],b.center.lon=h[0],e.notifyCenterUrlHashChanged(r,b.center,c.search()),m(b.center.bounds)){var j=i.calculateExtent(f.getSize()),k=b.center.projection,n=g.view.projection;b.center.bounds=a.proj.transformExtent(j,n,k)}}}})});r.$on("$destroy",function(){a.Observable.unByKey(v)})})}}}]),angular.module("openlayers-directive").directive("olLayer",["$log","$q","olMapDefaults","olHelpers",function(a,b,c,d){return{restrict:"E",scope:{properties:"=olLayerProperties",onLayerCreated:"&"},replace:!1,require:"^openlayers",link:function(a,b,e,f){var g=d.isDefined,h=d.equals,i=f.getOpenlayersScope(),j=d.createLayer,k=d.setVectorLayerEvents,l=d.detectLayerType,m=d.createStyle,n=d.isBoolean,o=d.addLayerBeforeMarkers,p=d.isNumber,q=d.insertLayer,r=d.removeLayer,s=d.addLayerToGroup,t=d.removeLayerFromGroup,u=d.getGroup;i.getMap().then(function(b){var d,f=b.getView().getProjection(),v=c.setDefaults(i),w=b.getLayers();if(a.$on("$destroy",function(){a.properties.group?t(w,d,a.properties.group):r(w,d.index),b.removeLayer(d)}),g(a.properties))a.$watch("properties",function(c,e){if(g(c.source)&&g(c.source.type)){if(!g(c.visible))return void(c.visible=!0);if(!g(c.opacity))return void(c.opacity=1);var i,x,y;if(g(d)){var z=function(a){return function(b){return b!==a}}(d);if(g(e)&&!h(c.source,e.source)){var A=d.index;y=w,x=d.get("group"),x&&(y=u(w,x).getLayers()),y.removeAt(A),d=j(c,f,a.onLayerCreated),d.set("group",x),g(d)&&(q(y,A,d),"Vector"===l(c)&&k(v.events,b,a,c.name))}(g(e)&&c.opacity!==e.opacity||z(d))&&(p(c.opacity)||p(parseFloat(c.opacity)))&&d.setOpacity(c.opacity),g(c.index)&&c.index!==d.index&&(y=w,x=d.get("group"),x&&(y=u(w,x).getLayers()),r(y,d.index),q(y,c.index,d)),g(c.group)&&c.group!==e.group&&(t(w,d,e.group),s(w,d,c.group)),g(e)&&n(c.visible)&&(c.visible!==e.visible||z(d)||d.getVisible()!==c.visible)&&d.setVisible(c.visible),(g(c.style)&&!h(c.style,e.style)||z(d))&&(i=angular.isFunction(c.style)?c.style:m(c.style),d.setStyle&&angular.isFunction(d.setStyle)&&d.setStyle(i)),h(c.minResolution,e.minResolution)&&!z(d)||g(c.minResolution)&&d.setMinResolution(c.minResolution),h(c.maxResolution,e.maxResolution)&&!z(d)||g(c.maxResolution)&&d.setMaxResolution(c.maxResolution)}else d=j(c,f,a.onLayerCreated),g(c.group)?s(w,d,c.group):g(c.index)?q(w,c.index,d):o(w,d),"Vector"===l(c)&&k(v.events,b,a,c.name),n(c.visible)&&d.setVisible(c.visible),c.opacity&&d.setOpacity(c.opacity),angular.isArray(c.extent)&&d.setExtent(c.extent),c.style&&(i=angular.isFunction(c.style)?c.style:m(c.style),d.setStyle&&angular.isFunction(d.setStyle)&&d.setStyle(i)),c.minResolution&&d.setMinResolution(c.minResolution),c.maxResolution&&d.setMaxResolution(c.maxResolution)}},!0);else if(g(e.sourceType)&&g(e.sourceUrl)){var x={source:{url:e.sourceUrl,type:e.sourceType}};d=j(x,f,e.layerName,a.onLayerCreated),"Vector"===l(x)&&k(v.events,b,a,e.name),o(w,d)}})}}}]),angular.module("openlayers-directive").directive("olPath",["$log","$q","olMapDefaults","olHelpers",function(a,b,c,d){return{restrict:"E",scope:{properties:"=olGeomProperties",style:"=olStyle"},require:"^openlayers",replace:!0,template:'<div class="popup-label path" ng-bind-html="message"></div>',link:function(a,b,e,f){var g=d.isDefined,h=d.createFeature,i=d.createOverlay,j=d.createVectorLayer,k=d.insertLayer,l=d.removeLayer,m=f.getOpenlayersScope();m.getMap().then(function(d){var f=c.getDefaults(m),n=f.view.projection,o=j(),p=d.getLayers();if(k(p,p.getLength(),o),a.$on("$destroy",function(){l(p,o.index)}),g(e.coords)){var q=e.proj||"EPSG:4326",r=JSON.parse(e.coords),s={type:"Polygon",coords:r,projection:q,style:a.style?a.style:f.styles.path},t=h(s,n);if(o.getSource().addFeature(t),e.message){a.message=e.message;var u=t.getGeometry().getExtent(),v=i(b,u);d.addOverlay(v)}}else;})}}}]),angular.module("openlayers-directive").directive("olView",["$log","$q","olData","olMapDefaults","olHelpers",function(b,c,d,e,f){return{restrict:"A",scope:!1,replace:!1,require:"openlayers",link:function(b,c,d,g){var h=g.getOpenlayersScope(),i=f.isNumber,j=f.safeApply,k=f.createView;h.getMap().then(function(b){var c=e.getDefaults(h),d=h.view;d.projection||(d.projection=c.view.projection),d.maxZoom||(d.maxZoom=c.view.maxZoom),d.minZoom||(d.minZoom=c.view.minZoom),d.rotation||(d.rotation=c.view.rotation);var f=k(d);b.setView(f),h.$watchCollection("view",function(a){i(a.rotation)&&f.setRotation(a.rotation)});var g=f.on("change:rotation",function(){j(h,function(a){a.view.rotation=b.getView().getRotation()})});h.$on("$destroy",function(){a.Observable.unByKey(g)})})}}}]),angular.module("openlayers-directive").directive("olControl",["$log","$q","olData","olMapDefaults","olHelpers",function(a,b,c,d,e){return{restrict:"E",scope:{properties:"=olControlProperties"},replace:!1,require:"^openlayers",link:function(a,b,c,d){var f,g,h=e.isDefined,i=d.getOpenlayersScope(),j=e.getControlClasses,k=j();i.getMap().then(function(b){function d(a){a&&a.control?(f&&b.removeControl(f),f=a.control,b.addControl(f)):c.name&&(h(a)&&(g=a),f&&b.removeControl(f),f=new k[c.name](g),b.addControl(f))}a.$on("$destroy",function(){b.removeControl(f)}),a.$watch("properties",function(a){h(a)&&d(a)}),d(a.properties)})}}}]),angular.module("openlayers-directive").directive("olMarker",["$log","$q","olMapDefaults","olHelpers",function(b,c,d,e){var f=function(){return{projection:"EPSG:4326",lat:0,lon:0,coord:[],show:!0,showOnMouseOver:!1,showOnMouseClick:!1,keepOneOverlayVisible:!1}},g=function(){function a(a){return b.map(function(a){return a.map}).indexOf(a)}var b=[];return{getInst:function(c,d){var f=a(d);if(-1===f){var g=e.createVectorLayer();g.set("markers",!0),d.addLayer(g),b.push({map:d,markerLayer:g,instScopes:[]}),f=b.length-1}return b[f].instScopes.push(c),b[f].markerLayer},deregisterScope:function(c,d){var e=a(d);if(-1===e)throw Error("This map has no markers");var f=b[e].instScopes,g=f.indexOf(c);if(-1===g)throw Error("Scope wan't registered");f.splice(g,1),f.length||(d.removeLayer(b[e].markerLayer),delete b[e].markerLayer,delete b[e])}}}();return{restrict:"E",scope:{lat:"=lat",lon:"=lon",label:"=label",properties:"=olMarkerProperties",style:"=olStyle"},transclude:!0,require:"^openlayers",replace:!0,template:'<div class="popup-label marker"><div ng-bind-html="message"></div><ng-transclude></ng-transclude></div>',link:function(c,h,i,j){var k=e.isDefined,l=j.getOpenlayersScope(),m=e.createFeature,n=e.createOverlay,o=h.find("ng-transclude").children().length>0;l.getMap().then(function(e){function j(){c.properties&&(e.getViewport().removeEventListener("mousemove",c.properties.handleInteraction),e.getViewport().removeEventListener("click",c.properties.handleTapInteraction),e.getViewport().querySelector("canvas.ol-unselectable").removeEventListener("touchend",c.properties.handleTapInteraction),e.getViewport().removeEventListener("mousemove",c.properties.showAtLeastOneOverlay),e.getViewport().removeEventListener("click",c.properties.removeAllOverlays),e.getViewport().querySelector("canvas.ol-unselectable").removeEventListener("touchmove",c.properties.activateCooldown))}var p,q,r,s=g.getInst(c,e),t=f(),u=d.getDefaults(l),v=u.view.projection,w=null,x=null;if(c.handleDrag=function(b){var d=b.coordinate,f=e.getView().getProjection().getCode();if(d="pixel"===f?d.map(function(a){return parseInt(a,10)}):a.proj.transform(d,f,"EPSG:4326"),"pointerdown"===b.type){var g=e.forEachFeatureAtPixel(b.pixel,function(a){return a});if(!(x=g?g.get("marker"):null)||!x.draggable)return void(x=null);e.getTarget().style.cursor="pointer",w="pixel"===f?[d[0]-x.coord[0],d[1]-x.coord[1]]:[d[0]-x.lon,d[1]-x.lat],b.preventDefault()}else w&&x&&("pointerup"===b.type?(e.getTarget().style.cursor="",w=null,x=null,b.preventDefault()):"pointerdrag"===b.type&&(b.preventDefault(),c.$apply(function(){"pixel"===f?(x.coord[0]=d[0]-w[0],x.coord[1]=d[1]-w[1]):(x.lon=d[0]-w[0],x.lat=d[1]-w[1])})))},e.on("pointerdown",c.handleDrag),e.on("pointerup",c.handleDrag),e.on("pointerdrag",c.handleDrag),c.$on("$destroy",function(){s.getSource().removeFeature(r),k(p)&&e.removeOverlay(p),g.deregisterScope(c,e),e.un("pointerdown",c.handleDrag),e.un("pointerup",c.handleDrag),e.un("pointerdrag",c.handleDrag),j()}),!k(c.properties))return t.lat=c.lat?c.lat:t.lat,t.lon=c.lon?c.lon:t.lon,t.message=i.message,t.style=c.style?c.style:u.styles.marker,r=m(t,v),k(r)||b.error("[AngularJS - Openlayers] Received invalid data on the marker."),r.set("marker",c),s.getSource().addFeature(r),void((t.message||o)&&(c.message=i.message,q=a.proj.transform([t.lon,t.lat],t.projection,v),p=n(h,q),e.addOverlay(p)));c.$watch("properties",function(d){if(j(),d.handleInteraction=function(b){var c=!1;if(i.hasOwnProperty("ngClick")&&(c=!0),!d.label.show||c){var f=!1,g=e.getEventPixel(b),j=e.forEachFeatureAtPixel(g,function(a){return a}),l=!1;if(j===r){if(l=!0,f=!0,c&&("click"===b.type||"touchend"===b.type))return h.triggerHandler("click"),b.preventDefault(),void b.stopPropagation();k(p)||(q="pixel"===t.projection?d.coord:a.proj.transform([d.lon,d.lat],t.projection,v),p=n(h,q),e.addOverlay(p)),e.getTarget().style.cursor="pointer"}!f&&p&&(l=!0,e.removeOverlay(p),p=void 0,e.getTarget().style.cursor=""),l&&b.preventDefault()}},d.handleTapInteraction=function(){var a,b=!1;return d.activateCooldown=function(){b=!0,a&&clearTimeout(a),a=setTimeout(function(){b=!1,a=null},500)},d.activateCooldown&&e.getViewport().querySelector("canvas.ol-unselectable").removeEventListener("touchmove",d.activateCooldown),e.getViewport().querySelector("canvas.ol-unselectable").addEventListener("touchmove",d.activateCooldown),function(){b||(d.handleInteraction.apply(null,arguments),d.activateCooldown())}}(),d.showAtLeastOneOverlay=function(b){if(!d.label.show){var c=!1,f=e.getEventPixel(b),g=e.forEachFeatureAtPixel(f,function(a){return a}),i=!1;g===r&&(i=!0,c=!0,k(p)||(q="pixel"===t.projection?t.coord:a.proj.transform([t.lon,t.lat],t.projection,v),p=n(h,q),angular.forEach(e.getOverlays(),function(a){e.removeOverlay(a)}),e.addOverlay(p)),e.getTarget().style.cursor="pointer"),!c&&p&&(i=!0,p=void 0,e.getTarget().style.cursor=""),i&&b.preventDefault()}},d.removeAllOverlays=function(a){angular.forEach(e.getOverlays(),function(a){e.removeOverlay(a)}),a.preventDefault()},k(r)){var f;if(f="pixel"===d.projection?d.coord:a.proj.transform([d.lon,d.lat],t.projection,e.getView().getProjection()),!angular.equals(r.getGeometry().getCoordinates(),f)){var g=new a.geom.Point(f);r.setGeometry(g)}}else t.projection=d.projection?d.projection:t.projection,t.coord=d.coord?d.coord:t.coord,t.lat=d.lat?d.lat:t.lat,t.lon=d.lon?d.lon:t.lon,k(d.style)?t.style=d.style:t.style=u.styles.marker,r=m(t,v),k(r)||b.error("[AngularJS - Openlayers] Received invalid data on the marker."),r.set("marker",d),s.getSource().addFeature(r);k(p)&&e.removeOverlay(p),k(d.label)&&(c.message=d.label.message,(o||k(c.message)&&0!==c.message.length)&&(d.label&&!0===d.label.show&&(q="pixel"===t.projection?t.coord:a.proj.transform([d.lon,d.lat],t.projection,v),p=n(h,q),e.addOverlay(p)),p&&d.label&&!1===d.label.show&&(e.removeOverlay(p),p=void 0),d.label&&!1===d.label.show&&d.label.showOnMouseOver&&e.getViewport().addEventListener("mousemove",d.handleInteraction),(d.label&&!1===d.label.show&&d.label.showOnMouseClick||i.hasOwnProperty("ngClick"))&&(e.getViewport().addEventListener("click",d.handleTapInteraction),e.getViewport().querySelector("canvas.ol-unselectable").addEventListener("touchend",d.handleTapInteraction)),d.label&&!1===d.label.show&&d.label.keepOneOverlayVisible&&(e.getViewport().addEventListener("mousemove",d.showAtLeastOneOverlay),e.getViewport().addEventListener("click",d.removeAllOverlays))))},!0)})}}}]),angular.module("openlayers-directive").service("olData",["$log","$q",function(a,b){function c(b,c){var d,e;if(angular.isDefined(c))d=c;else if(1===Object.keys(b).length)for(e in b)b.hasOwnProperty(e)&&(d=e);else 0===Object.keys(b).length?d="main":a.error("[AngularJS - Openlayers] - You have more than 1 map on the DOM, you must provide the map ID to the olData.getXXX call");return d}var d={},e=function(a,b){a[c(a,b)].resolvedDefer=!0},f=function(a,d){var e,f=c(a,d);return angular.isDefined(a[f])&&!0!==a[f].resolvedDefer?e=a[f].defer:(e=b.defer(),a[f]={defer:e,resolvedDefer:!1}),e},g=function(a,b){var d=c(a,b);return angular.isDefined(a[d])&&!1!==a[d].resolvedDefer?a[d].defer:f(a,b)};this.setMap=function(a,b){f(d,b).resolve(a),e(d,b)},this.getMap=function(a){return g(d,a).promise},this.resetMap=function(a){angular.isDefined(d[a])&&delete d[a]}}]),angular.module("openlayers-directive").factory("olHelpers",["$q","$log","$http",function(b,c,d){var e=function(a){return angular.isDefined(a)},f=function(a){return angular.isDefined(a)&&null!==a},g=function(a,b,c){a.on(b,function(d){var e=d.coordinate,f=a.getView().getProjection().getCode();"pixel"===f&&(e=e.map(function(a){return parseInt(a,10)})),c.$emit("openlayers.map."+b,{coord:e,projection:f,event:d})})},h=["Road","Aerial","AerialWithLabels","collinsBart","ordnanceSurvey"],i=function(){return{attribution:a.control.Attribution,fullscreen:a.control.FullScreen,mouseposition:a.control.MousePosition,overviewmap:a.control.OverviewMap,rotate:a.control.Rotate,scaleline:a.control.ScaleLine,zoom:a.control.Zoom,zoomslider:a.control.ZoomSlider,zoomtoextent:a.control.ZoomToExtent}},j=["osm","sat","hyb"],k=["World_Imagery","World_Street_Map","World_Topo_Map","World_Physical_Map","World_Terrain_Base","Ocean_Basemap","NatGeo_World_Map"],l={style:a.style.Style,fill:a.style.Fill,stroke:a.style.Stroke,circle:a.style.Circle,icon:a.style.Icon,image:a.style.Image,regularshape:a.style.RegularShape,text:a.style.Text},m=function(a,b){return b&&a instanceof b?a:b?new b(a):a},n=function a(b,c){var d;if(c?d=b[c]:(c="style",d=b),"style"===c&&b instanceof Function)return b;if(!(d instanceof Object))return d;var e;if("[object Object]"===Object.prototype.toString.call(d)){e={};var f=l[c];if(f&&d instanceof f)return d;Object.getOwnPropertyNames(d).forEach(function(b,g,h){var i=l[b];if(f&&i&&i.prototype instanceof l[c])return console.assert(1===h.length,"Extra parameters for "+c),e=a(d,b),m(e,i);e[b]=a(d,b),"text"!==b&&"string"!=typeof e[b]&&(e[b]=m(e[b],l[b]))})}else e=d;return m(e,l[c])},o=function(a){if(a.type)return a.type;switch(a.source.type){case"ImageWMS":case"ImageStatic":return"Image";case"GeoJSON":case"JSONP":case"TopoJSON":case"KML":case"WKT":return"Vector";case"TileVector":case"MVT":return"TileVector";default:return"Tile"}},p=function(b){var d;switch(b.projection){case"pixel":if(!e(b.extent))return void c.error("[AngularJS - Openlayers] - You must provide the extent of the image if using pixel projection");d=new a.proj.Projection({code:"pixel",units:"pixels",extent:b.extent});break;default:d=new a.proj.get(b.projection)}return d},q=function(a){return-1!==["watercolor","terrain","toner"].indexOf(a)},r=function(b,f){var g,i,l,m=new a.format.GeoJSON;switch(b.type){case"MapBox":if(!b.mapId||!b.accessToken)return void c.error("[AngularJS - Openlayers] - MapBox layer requires the map id and the access token");l="https://api.tiles.mapbox.com/v4/"+b.mapId+"/{z}/{x}/{y}.png?access_token="+b.accessToken,i=window.devicePixelRatio,i>1&&(l=l.replace(".png","@2x.png")),g=new a.source.XYZ({url:l,tileLoadFunction:b.tileLoadFunction,attributions:t(b),tilePixelRatio:i>1?2:1,wrapX:void 0===b.wrapX||b.wrapX});break;case"MapBoxStudio":if(!b.mapId||!b.accessToken||!b.userId)return void c.error("[AngularJS - Openlayers] - MapBox Studio layer requires the map id, user id and the access token");l="https://api.mapbox.com/styles/v1/"+b.userId+"/"+b.mapId+"/tiles/{z}/{x}/{y}?access_token="+b.accessToken,i=window.devicePixelRatio,i>1&&(l=l.replace("{y}?access_token","{y}@2x?access_token")),g=new a.source.XYZ({url:l,tileLoadFunction:b.tileLoadFunction,attributions:t(b),tilePixelRatio:i>1?2:1,tileSize:b.tileSize||[512,512],wrapX:void 0===b.wrapX||b.wrapX});break;case"MVT":if(!b.url)return void c.error("[AngularJS - Openlayers] - MVT layer requires the source url");g=new a.source.VectorTile({attributions:b.attributions||"",format:new a.format.MVT,tileGrid:a.tilegrid.createXYZ({maxZoom:b.maxZoom||22}),tilePixelRatio:b.tilePixelRatio||16,url:b.url});break;case"ImageWMS":b.url&&b.params||c.error("[AngularJS - Openlayers] - ImageWMS Layer needs valid server url and params properties"),g=new a.source.ImageWMS({url:b.url,imageLoadFunction:b.imageLoadFunction,attributions:t(b),crossOrigin:void 0===b.crossOrigin?"anonymous":b.crossOrigin,params:s(b.params),ratio:b.ratio});break;case"TileWMS":(b.url||b.urls)&&b.params||c.error("[AngularJS - Openlayers] - TileWMS Layer needs valid url (or urls) and params properties");var n={tileLoadFunction:b.tileLoadFunction,crossOrigin:void 0===b.crossOrigin?"anonymous":b.crossOrigin,params:s(b.params),attributions:t(b),wrapX:void 0===b.wrapX||b.wrapX};b.serverType&&(n.serverType=b.serverType),b.url&&(n.url=b.url),b.urls&&(n.urls=b.urls),g=new a.source.TileWMS(n);break;case"WMTS":(b.url||b.urls)&&b.tileGrid||c.error("[AngularJS - Openlayers] - WMTS Layer needs valid url (or urls) and tileGrid properties");var o={tileLoadFunction:b.tileLoadFunction,projection:f,layer:b.layer,attributions:t(b),matrixSet:"undefined"===b.matrixSet?f:b.matrixSet,format:"undefined"===b.format?"image/jpeg":b.format,requestEncoding:"undefined"===b.requestEncoding?"KVP":b.requestEncoding,tileGrid:new a.tilegrid.WMTS({origin:b.tileGrid.origin,resolutions:b.tileGrid.resolutions,matrixIds:b.tileGrid.matrixIds}),style:"undefined"===b.style?"normal":b.style,wrapX:void 0===b.wrapX||b.wrapX};e(b.url)&&(o.url=b.url),e(b.urls)&&(o.urls=b.urls),g=new a.source.WMTS(o);break;case"OSM":g=new a.source.OSM({tileLoadFunction:b.tileLoadFunction,attributions:t(b),wrapX:void 0===b.wrapX||b.wrapX}),b.url&&g.setUrl(b.url);break;case"BingMaps":if(!b.key)return void c.error("[AngularJS - Openlayers] - You need an API key to show the Bing Maps.");var p={key:b.key,tileLoadFunction:b.tileLoadFunction,attributions:t(b),imagerySet:b.imagerySet?b.imagerySet:h[0],culture:b.culture,wrapX:void 0===b.wrapX||b.wrapX};b.maxZoom&&(p.maxZoom=b.maxZoom),g=new a.source.BingMaps(p);break;case"MapQuest":if(!b.layer||-1===j.indexOf(b.layer))return void c.error("[AngularJS - Openlayers] - MapQuest layers needs a valid 'layer' property.");g=new a.source.MapQuest({attributions:t(b),layer:b.layer,wrapX:void 0===b.wrapX||b.wrapX});break;case"EsriBaseMaps":if(!b.layer||-1===k.indexOf(b.layer))return void c.error("[AngularJS - Openlayers] - ESRI layers needs a valid 'layer' property.");var r="https://services.arcgisonline.com/ArcGIS/rest/services/",u=r+b.layer+"/MapServer/tile/{z}/{y}/{x}";g=new a.source.XYZ({attributions:t(b),tileLoadFunction:b.tileLoadFunction,url:u,wrapX:void 0===b.wrapX||b.wrapX});break;case"TileArcGISRest":b.url||c.error("[AngularJS - Openlayers] - TileArcGISRest Layer needs valid url"),g=new a.source.TileArcGISRest({attributions:t(b),tileLoadFunction:b.tileLoadFunction,url:b.url,wrapX:void 0===b.wrapX||b.wrapX});break;case"GeoJSON":if(!b.geojson&&!b.url)return void c.error("[AngularJS - Openlayers] - You need a geojson property to add a GeoJSON layer.");if(e(b.url))g=new a.source.Vector({format:new a.format.GeoJSON,url:b.url});else{g=new a.source.Vector;var v,w=f;v=e(b.geojson.projection)?new a.proj.get(b.geojson.projection):f;var x=m.readFeatures(b.geojson.object,{featureProjection:w.getCode(),dataProjection:v.getCode()});g.addFeatures(x)}break;case"WKT":if(!b.wkt&&!b.wkt.data)return void c.error("[AngularJS - Openlayers] - You need a WKT property to add a WKT format vector layer.");g=new a.source.Vector;var y,z=new a.format.WKT;y=e(b.wkt.projection)?new a.proj.get(b.wkt.projection):f;var A=z.readFeatures(b.wkt.data,{featureProjection:f.getCode(),dataProjection:y.getCode()});g.addFeatures(A);break;case"JSONP":if(!b.url)return void c.error("[AngularJS - Openlayers] - You need an url properly configured to add a JSONP layer.");e(b.url)&&(g=new a.source.ServerVector({format:m,loader:function(){var a=b.url+"&outputFormat=text/javascript&format_options=callback:JSON_CALLBACK";d.jsonp(a,{cache:b.cache}).success(function(a){g.addFeatures(m.readFeatures(a))}).error(function(a){c(a)})},projection:f}));break;case"TopoJSON":if(!b.topojson&&!b.url)return void c.error("[AngularJS - Openlayers] - You need a topojson property to add a TopoJSON layer.");g=b.url?new a.source.Vector({format:new a.format.TopoJSON,url:b.url}):new a.source.Vector(angular.extend(b.topojson,{format:new a.format.TopoJSON}));break;case"TileJSON":g=new a.source.TileJSON({url:b.url,attributions:t(b),tileLoadFunction:b.tileLoadFunction,crossOrigin:"anonymous",wrapX:void 0===b.wrapX||b.wrapX});break;case"TileVector":b.url&&b.format||c.error("[AngularJS - Openlayers] - TileVector Layer needs valid url and format properties"),g=new a.source.VectorTile({url:b.url,projection:f,attributions:t(b),tileLoadFunction:b.tileLoadFunction,format:b.format,tileGrid:new a.tilegrid.createXYZ({maxZoom:b.maxZoom||19}),wrapX:void 0===b.wrapX||b.wrapX});break;case"TileTMS":b.url&&b.tileGrid||c.error("[AngularJS - Openlayers] - TileTMS Layer needs valid url and tileGrid properties"),g=new a.source.TileImage({url:b.url,maxExtent:b.maxExtent,attributions:t(b),tileLoadFunction:b.tileLoadFunction,tileGrid:new a.tilegrid.TileGrid({origin:b.tileGrid.origin,resolutions:b.tileGrid.resolutions}),tileUrlFunction:function(a){var c=a[0],d=a[1],e=a[2];return d<0||e<0?"":b.url+c+"/"+d+"/"+e+".png"},wrapX:void 0===b.wrapX||b.wrapX});break;case"TileImage":g=new a.source.TileImage({url:b.url,attributions:t(b),tileLoadFunction:b.tileLoadFunction,tileGrid:new a.tilegrid.TileGrid({origin:b.tileGrid.origin,resolutions:b.tileGrid.resolutions}),tileUrlFunction:function(a){var c=a[0],d=a[1],e=-a[2]-1;return b.url.replace("{z}",c.toString()).replace("{x}",d.toString()).replace("{y}",e.toString())},wrapX:void 0===b.wrapX||b.wrapX});break;case"KML":var B=b.extractStyles||!1;g=new a.source.Vector({url:b.url,format:new a.format.KML,radius:b.radius,extractStyles:B});break;case"Stamen":if(!b.layer||!q(b.layer))return void c.error("[AngularJS - Openlayers] - You need a valid Stamen layer.");g=new a.source.Stamen({tileLoadFunction:b.tileLoadFunction,layer:b.layer,wrapX:void 0===b.wrapX||b.wrapX});break;case"ImageStatic":if(!b.url||!angular.isArray(b.imageSize)||2!==b.imageSize.length)return void c.error("[AngularJS - Openlayers] - You need a image URL to create a ImageStatic layer.");g=new a.source.ImageStatic({url:b.url,attributions:t(b),imageSize:b.imageSize,projection:f,imageExtent:b.imageExtent?b.imageExtent:f.getExtent(),imageLoadFunction:b.imageLoadFunction});break;case"XYZ":b.url||b.urls||b.tileUrlFunction||c.error("[AngularJS - Openlayers] - XYZ Layer needs valid url(s) or tileUrlFunction properties"),g=new a.source.XYZ({url:b.url,urls:b.urls,attributions:t(b),minZoom:b.minZoom,maxZoom:b.maxZoom,projection:b.projection,tileUrlFunction:b.tileUrlFunction,tileLoadFunction:b.tileLoadFunction,wrapX:void 0===b.wrapX||b.wrapX});break;case"Zoomify":b.url&&angular.isArray(b.imageSize)&&2===b.imageSize.length||c.error("[AngularJS - Openlayers] - Zoomify Layer needs valid url and imageSize properties"),g=new a.source.Zoomify({url:b.url,size:b.imageSize,wrapX:void 0===b.wrapX||b.wrapX})}return g||c.warn('[AngularJS - Openlayers] - No source could be found for type "'+b.type+'"'),g},s=function(a){var b=a;if(a&&"object"==typeof a){b="[object Array]"===Object.prototype.toString.call(a)?[]:{};for(var c in a)b[c]=s(a[c])}return b},t=function(b){var c=[];if(e(b.attribution))!1!==b.attribution&&c.unshift(new a.Attribution({html:b.attribution}));else{var d=u(b);d&&c.unshift(d)}return c},u=function(b){if(b&&b.type){var c=a.source[b.type];if(c)for(var d in c)if(c.hasOwnProperty(d)&&d.toLowerCase().indexOf("attribution")>-1)return a.source[b.type][d]}return null},v=function(b){var c=new a.layer.Group;return c.set("name",b),c},w=function(b,c){var d;return angular.forEach(b,function(b){if(b instanceof a.layer.Group&&b.get("name")===c)return void(d=b)}),d},x=function(a,b){for(var c,d=0;d<a.getLength();d++){if(a.item(d).get("markers")){c=d;break}}if(e(c)){var f=a.item(c);b.index=c,a.setAt(c,b),f.index=a.getLength(),a.push(f)}else b.index=a.getLength(),a.push(b)},y=function(a,b){a.removeAt(b);for(var c=b;c<a.getLength();c++){var d=a.item(c);if(null===d){a.insertAt(c,null);break}d.index=c}};return{isDefined:e,isNumber:function(a){return angular.isNumber(a)},createView:function(b){var c=p(b),d={projection:c,maxZoom:b.maxZoom,minZoom:b.minZoom};return b.center&&(d.center=b.center),b.extent&&(d.extent=b.extent),b.zoom&&(d.zoom=b.zoom),b.resolutions&&(d.resolutions=b.resolutions),new a.View(d)},isDefinedAndNotNull:f,isString:function(a){return angular.isString(a)},isArray:function(a){return angular.isArray(a)},isObject:function(a){return angular.isObject(a)},equals:function(a,b){return angular.equals(a,b)},isValidCenter:function(a){return angular.isDefined(a)&&("boolean"==typeof a.autodiscover||angular.isNumber(a.lat)&&angular.isNumber(a.lon)||angular.isArray(a.coord)&&2===a.coord.length&&angular.isNumber(a.coord[0])&&angular.isNumber(a.coord[1])||angular.isArray(a.bounds)&&4===a.bounds.length&&angular.isNumber(a.bounds[0])&&angular.isNumber(a.bounds[1])&&angular.isNumber(a.bounds[1])&&angular.isNumber(a.bounds[2]))},safeApply:function(a,b){var c=a.$root.$$phase;"$apply"===c||"$digest"===c?a.$eval(b):a.$apply(b)},isSameCenterOnMap:function(b,c){var d=b.projection||"EPSG:4326",e=[b.lon,b.lat],f=c.getView().getProjection(),g=a.proj.transform(c.getView().getCenter(),f,d),h=c.getView().getZoom();return g[1].toFixed(4)===e[1].toFixed(4)&&g[0].toFixed(4)===e[0].toFixed(4)&&h===b.zoom},setCenter:function(b,c,d,e){if(e&&b.getCenter()&&b.animate({duration:150,center:b.getCenter()}),d.projection===c)b.setCenter([d.lon,d.lat]);else{var f=[d.lon,d.lat];b.setCenter(a.proj.transform(f,d.projection,c))}},setZoom:function(a,b,c){a.animate({duration:150,resolution:c.getView().getResolution(),zoom:b}),a.setZoom(b)},isBoolean:function(a){return"boolean"==typeof a},createStyle:n,setMapEvents:function(a,b,c){if(e(a)&&angular.isArray(a.map))for(var d in a.map){var f=a.map[d];g(b,f,c)}},setVectorLayerEvents:function(a,b,c,d){e(a)&&angular.isArray(a.layers)&&angular.forEach(a.layers,function(a){angular.element(b.getViewport()).on(a,function(e){var g=b.getEventPixel(e),h=b.forEachFeatureAtPixel(g,function(a,b){return f(b)&&b.get("name")===d?a:null});f(h)&&c.$emit("openlayers.layers."+d+"."+a,h,e)})})},setViewEvents:function(a,b,c){if(e(a)&&angular.isArray(a.view)){var d=b.getView();angular.forEach(a.view,function(a){d.on(a,function(b){c.$emit("openlayers.view."+a,d,b)})})}},detectLayerType:o,createLayer:function(b,c,d,g){var h,i=o(b),j=r(b.source,c);if(j){"function"!=typeof d||g||(g=d,d=void 0),"Vector"===i&&b.clustering&&(j=new a.source.Cluster({source:j,distance:b.clusteringDistance}));var k={};for(var l in b)b.hasOwnProperty(l)&&0!==l.indexOf("$",0)&&0!==l.indexOf("source",0)&&0!==l.indexOf("style",0)&&(k[l]=b[l]);switch(k.source=j,f(b.opacity)&&(k.opacity=b.opacity),f(b.visible)&&(k.visible=b.visible),f(b.extent)&&(k.extent=b.extent),f(b.zIndex)&&(k.zIndex=b.zIndex),f(b.minResolution)&&(k.minResolution=b.minResolution),f(b.maxResolution)&&(k.maxResolution=b.maxResolution),f(b.style)&&"TileVector"===i&&(k.style=b.style),i){case"Image":h=new a.layer.Image(k);break;case"Tile":h=new a.layer.Tile(k);break;case"Heatmap":h=new a.layer.Heatmap(k);break;case"Vector":h=new a.layer.Vector(k);break;case"TileVector":h=new a.layer.VectorTile(k)}if(e(d)?h.set("name",d):e(b.name)&&h.set("name",b.name),e(b.customAttributes))for(var m in b.customAttributes)h.set(m,b.customAttributes[m]);return g&&g({oLayer:h}),h}},createVectorLayer:function(){return new a.layer.Vector({source:new a.source.Vector})},
notifyCenterUrlHashChanged:function(a,b,c){if(b.centerUrlHash){var d=b.lat.toFixed(4)+":"+b.lon.toFixed(4)+":"+b.zoom;e(c.c)&&c.c===d||a.$emit("centerUrlHash",d)}},getControlClasses:i,detectControls:function(a){var b={},c=i();return a.forEach(function(a){for(var d in c)a instanceof c[d]&&(b[d]=a)}),b},createFeature:function(b,c){var d;switch(b.type){case"Polygon":d=new a.geom.Polygon(b.coords);break;default:d=e(b.coord)&&"pixel"===b.projection?new a.geom.Point(b.coord):new a.geom.Point([b.lon,b.lat])}e(b.projection)&&"pixel"!==b.projection&&(d=d.transform(b.projection,c));var f=new a.Feature({geometry:d});if(e(b.style)){var g=n(b.style);f.setStyle(g)}return f},addLayerBeforeMarkers:x,getGroup:w,addLayerToGroup:function(a,b,c){var d=w(a,c);e(d)||(d=v(c),x(a,d)),b.set("group",c),x(d.getLayers(),b)},removeLayerFromGroup:function(a,b,c){var d=w(a,c);b.set("group"),y(d.getLayers(),b.index)},removeLayer:y,insertLayer:function(b,c,d){if(b.getLength()<c){for(;b.getLength()<c;){var e=new a.layer.Image;e.index=b.getLength(),e.name="(null-layer)",b.push(e)}d.index=c,b.push(d)}else{d.index=c,b.insertAt(d.index,d);for(var f=c+1;f<b.getLength();f++){var g=b.item(f);if("(null-layer)"===g.name){b.removeAt(f);break}g.index=f}}},createOverlay:function(b,c){return b.css("display","block"),new a.Overlay({position:c,element:b[0],positioning:"center-left"})}}}]),angular.module("openlayers-directive").factory("olMapDefaults",["$q","olHelpers",function(b,c){var d="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABkAAAApCAYAAADAk4LOAAAGmklEQVRYw7VXeUyTZxjvNnfELFuyIzOabermMZEeQC/OclkO49CpOHXOLJl/CAURuYbQi3KLgEhbrhZ1aDwmaoGqKII6odATmH/scDFbdC7LvFqOCc+e95s2VG50X/LLm/f4/Z7neY/ne18aANCmAr5E/xZf1uDOkTcGcWR6hl9247tT5U7Y6SNvWsKT63P58qbfeLJG8M5qcgTknrvvrdDbsT7Ml+tv82X6vVxJE33aRmgSyYtcWVMqX97Yv2JvW39UhRE2HuyBL+t+gK1116ly06EeWFNlAmHxlQE0OMiV6mQCScusKRlhS3QLeVJdl1+23h5dY4FNB3thrbYboqptEFlphTC1hSpJnbRvxP4NWgsE5Jyz86QNNi/5qSUTGuFk1gu54tN9wuK2wc3o+Wc13RCmsoBwEqzGcZsxsvCSy/9wJKf7UWf1mEY8JWfewc67UUoDbDjQC+FqK4QqLVMGGR9d2wurKzqBk3nqIT/9zLxRRjgZ9bqQgub+DdoeCC03Q8j+0QhFhBHR/eP3U/zCln7Uu+hihJ1+bBNffLIvmkyP0gpBZWYXhKussK6mBz5HT6M1Nqpcp+mBCPXosYQfrekGvrjewd59/GvKCE7TbK/04/ZV5QZYVWmDwH1mF3xa2Q3ra3DBC5vBT1oP7PTj4C0+CcL8c7C2CtejqhuCnuIQHaKHzvcRfZpnylFfXsYJx3pNLwhKzRAwAhEqG0SpusBHfAKkxw3w4627MPhoCH798z7s0ZnBJ/MEJbZSbXPhER2ih7p2ok/zSj2cEJDd4CAe+5WYnBCgR2uruyEw6zRoW6/DWJ/OeAP8pd/BGtzOZKpG8oke0SX6GMmRk6GFlyAc59K32OTEinILRJRchah8HQwND8N435Z9Z0FY1EqtxUg+0SO6RJ/mmXz4VuS+DpxXC3gXmZwIL7dBSH4zKE50wESf8qwVgrP1EIlTO5JP9Igu0aexdh28F1lmAEGJGfh7jE6ElyM5Rw/FDcYJjWhbeiBYoYNIpc2FT/SILivp0F1ipDWk4BIEo2VuodEJUifhbiltnNBIXPUFCMpthtAyqws/BPlEF/VbaIxErdxPphsU7rcCp8DohC+GvBIPJS/tW2jtvTmmAeuNO8BNOYQeG8G/2OzCJ3q+soYB5i6NhMaKr17FSal7GIHheuV3uSCY8qYVuEm1cOzqdWr7ku/R0BDoTT+DT+ohCM6/CCvKLKO4RI+dXPeAuaMqksaKrZ7L3FE5FIFbkIceeOZ2OcHO6wIhTkNo0ffgjRGxEqogXHYUPHfWAC/lADpwGcLRY3aeK4/oRGCKYcZXPVoeX/kelVYY8dUGf8V5EBRbgJXT5QIPhP9ePJi428JKOiEYhYXFBqou2Guh+p/mEB1/RfMw6rY7cxcjTrneI1FrDyuzUSRm9miwEJx8E/gUmqlyvHGkneiwErR21F3tNOK5Tf0yXaT+O7DgCvALTUBXdM4YhC/IawPU+2PduqMvuaR6eoxSwUk75ggqsYJ7VicsnwGIkZBSXKOUww73WGXyqP+J2/b9c+gi1YAg/xpwck3gJuucNrh5JvDPvQr0WFXf0piyt8f8/WI0hV4pRxxkQZdJDfDJNOAmM0Ag8jyT6hz0WGXWuP94Yh2jcfjmXAGvHCMslRimDHYuHuDsy2QtHuIavznhbYURq5R57KpzBBRZKPJi8eQg48h4j8SDdowifdIrEVdU+gbO6QNvRRt4ZBthUaZhUnjlYObNagV3keoeru3rU7rcuceqU1mJBxy+BWZYlNEBH+0eH4vRiB+OYybU2hnblYlTvkHinM4m54YnxSyaZYSF6R3jwgP7udKLGIX6r/lbNa9N6y5MFynjWDtrHd75ZvTYAPO/6RgF0k76mQla3FGq7dO+cH8sKn0Vo7nDllwAhqwLPkxrHwWmHJOo+AKJ4rab5OgrM7rVu8eWb2Pu0Dh4eDgXoOfvp7Y7QeqknRmvcTBEyq9m/HQQSCSz6LHq3z0yzsNySRfMS253wl2KyRDbcZPcfJKjZmSEOjcxyi+Y8dUOtsIEH6R2wNykdqrkYJ0RV92H0W58pkfQk7cKevsLK10Py8SdMGfXNXATY+pPbyJR/ET6n9nIfztNtZYRV9XniQu9IA2vOVgy4ir7GCLVmmd+zjkH0eAF9Po6K61pmCXHxU5rHMYd1ftc3owjwRSVRzLjKvqZEty6cRUD7jGqiOdu5HG6MdHjNcNYGqfDm5YRzLBBCCDl/2bk8a8gdbqcfwECu62Fg/HrggAAAABJRU5ErkJggg==",e=function(){return{view:{projection:"EPSG:3857",minZoom:void 0,maxZoom:void 0,rotation:0,extent:void 0},center:{lat:0,lon:0,zoom:1,autodiscover:!1,bounds:[],centerUrlHash:!1,projection:"EPSG:4326"},styles:{path:{stroke:{color:"blue",width:8}},marker:{image:new a.style.Icon({anchor:[.5,1],anchorXUnits:"fraction",anchorYUnits:"fraction",opacity:.9,src:d})}},events:{map:[],markers:[],layers:[]},controls:{attribution:!0,rotate:!1,zoom:!0},interactions:{mouseWheelZoom:!1},renderer:"canvas"}},f=c.isDefined,g={};return{getDefaults:function(a){if(!f(a))for(var b in g)return g[b];return g[a.$id]},setDefaults:function(a){var b=a.defaults,c=a.$id,d=e();return f(b)&&(f(b.layers)&&(d.layers=angular.copy(b.layers)),f(b.controls)&&(d.controls=angular.copy(b.controls)),f(b.events)&&(d.events=angular.copy(b.events)),f(b.interactions)&&(d.interactions=angular.copy(b.interactions)),f(b.renderer)&&(d.renderer=b.renderer),f(b.view)&&(d.view.maxZoom=b.view.maxZoom||d.view.maxZoom,d.view.minZoom=b.view.minZoom||d.view.minZoom,d.view.projection=b.view.projection||d.view.projection,d.view.extent=b.view.extent||d.view.extent,d.view.resolutions=b.view.resolutions||d.view.resolutions),f(b.styles)&&(d.styles=angular.extend(d.styles,b.styles)),f(b.loadTilesWhileAnimating)&&(d.loadTilesWhileAnimating=b.loadTilesWhileAnimating),f(b.loadTilesWhileInteracting)&&(d.loadTilesWhileInteracting=b.loadTilesWhileInteracting)),g[c]=d,d}}}])}); | PypiClean |
/CosmoTech_Acceleration_Library-0.3.0.tar.gz/CosmoTech_Acceleration_Library-0.3.0/CosmoTech_Acceleration_Library/Modelops/core/io/model_reader.py | import logging
from CosmoTech_Acceleration_Library.Modelops.core.common.graph_handler import VersionedGraphHandler
from CosmoTech_Acceleration_Library.Modelops.core.utils.model_util import ModelUtil
from redis.commands.graph.query_result import QueryResult
logger = logging.getLogger(__name__)
class ModelReader(VersionedGraphHandler):
"""
Model Reader for cached data
"""
def get_twin_types(self) -> list:
"""
Get twin types
:return: twin types list
"""
return [item for sublist in self.graph.labels() for item in sublist]
def get_twins_by_type(self, twin_type: str, limit: int = 0) -> QueryResult:
"""
Get twins by type
:param twin_type: the twin type requested
:param limit: the limit number of twin retrieved
:return: the twin list corresponding to twin type parameter
"""
twin_query = f'MATCH (node:{twin_type}) RETURN node'
if limit != 0:
twin_query = f'{twin_query} LIMIT {str(limit)}'
logger.debug(f"Query : {twin_query}")
return self.graph.query(twin_query, read_only=True)
def get_twin_properties_by_type(self, twin_type: str) -> list:
"""
Get twin properties regarding a twin_type
Note: this will work if all twin (with the same type) have same properties set
:param twin_type: the twin type
:return: the properties list
"""
result = []
twin_result = self.get_twins_by_type(twin_type, 1)
result_set = twin_result.result_set
if result_set and result_set[0]:
for key, val in result_set[0][0].properties.items():
if str(key) != ModelUtil.dt_id_key:
result.append(str(key))
else:
result.append(ModelUtil.id_key)
return result
def get_relationship_types(self) -> list:
"""
Get relationship types
:return: relationship types list
"""
return [item for sublist in self.graph.relationship_types() for item in sublist]
def get_relationships_by_type(self, relationship_type: str, limit: int = 0) -> QueryResult:
"""
Get relationships by type
:param relationship_type: the relationship type requested
:param limit: the limit number of twin retrieved
:return: the relationship list corresponding to relationship type parameter
"""
rel_query = f'MATCH (n)-[relation:{relationship_type}]->(m) RETURN n.{ModelUtil.dt_id_key} as {ModelUtil.source_key}, ' \
f'm.{ModelUtil.dt_id_key} as {ModelUtil.target_key}, relation'
if limit != 0:
rel_query = f'{rel_query} LIMIT {str(limit)}'
logger.debug(f"Query : {rel_query}")
return self.graph.query(rel_query, read_only=True)
def get_relationship_properties_by_type(self, relationship_type: str) -> list:
"""
Get relationship properties regarding a relationship_type
Note: this will work if all relationship (with the same type) have same properties set
:param relationship_type: the relationship type
:return: the properties list
"""
result = [ModelUtil.source_key, ModelUtil.target_key]
relationship_result = self.get_relationships_by_type(relationship_type, 1)
result_set = relationship_result.result_set
if result_set and result_set[0]:
# relationship
for key, val in result_set[0][2].properties.items():
if not str(key) in result:
if str(key) == ModelUtil.dt_id_key:
result.append(ModelUtil.id_key)
elif str(key) != ModelUtil.src_key and str(key) != ModelUtil.dest_key:
result.append(str(key))
return result
def query(self, query: str, params: dict = None, timeout: int = None, read_only: bool = False) -> QueryResult:
"""
Run specified query
:param query: the query to run
:param params: the parameters for the query if any
:param timeout: a specific timeout
:param read_only: executes a readonly query if set to True
:return: the QueryResult corresponding to specified query
"""
logger.debug(f"Query : {query} with params : {params}")
return self.graph.query(q=query, params=params, timeout=timeout, read_only=read_only)
def exists(self, key) -> bool:
"""
Check if a key exists in Redis
:param key: the key
:return: True if exists else False
"""
return False if self.r.exists(key) == 0 else True | PypiClean |
/ChirpSounder-2.0.4.tar.gz/ChirpSounder-2.0.4/src/chirpsounder/chirp_lib.py | import ctypes
import numpy as n
from ctypes.util import find_library
from numpy import ctypeslib
import matplotlib.pyplot as plt
import scipy.signal as ss
from . import __path__
libdc=ctypes.cdll.LoadLibrary(__path__[0]+"/libdownconvert.cpython-38-x86_64-linux-gnu.so")
libdc.test.argtypes =[ ctypeslib.ndpointer(n.complex64,ndim=1,flags='C'), ctypes.c_int ]
libdc.consume.argtypes =[ ctypes.c_double,
ctypes.c_double,
ctypeslib.ndpointer(n.complex64,ndim=1,flags='C'),
ctypes.c_int,
ctypeslib.ndpointer(n.complex64,ndim=1,flags='C'),
ctypeslib.ndpointer(n.complex64,ndim=1,flags='C'),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_double,
ctypes.c_double,
ctypeslib.ndpointer(n.float32,ndim=1,flags='C'),
ctypes.c_int]
class chirp_downconvert:
def __init__(self,
tab_len=8192,
f0=-12.5e6,
rate=100e3,
dec=2500,
filter_len=2,
n_threads=4,
dt=1.0/25e6):
# let's add a windowed low pass filter to make this nearly perfect.
# normalized cutoff freq
self.n_threads=n_threads
# om0
self.om0=2.0*n.pi/float(dec)
self.dec2=filter_len*dec
self.m=n.array(n.arange(filter_len*dec)-dec,dtype=n.float32)
# windowed low pass filter
self.wfun=n.array(ss.hann(len(self.m))*n.sin(self.om0*(self.m+1e-6))/(n.pi*(self.m+1e-6)),dtype=n.float32)
# the window function could be twice the decimation rate
self.chirpt=0.0
# conjugate sinusoid!
self.sintab=n.array(n.exp(-1j*2.0*n.pi*n.arange(tab_len)/tab_len),dtype=n.complex64)
self.tab_len=tab_len
self.f0=f0
self.rate=rate
self.dec=dec
self.dt=dt
self.filter_len=filter_len
def consume(self,
z_in,
z_out,
n_out):
#void consume(double chirpt, double dt, complex_float *sintab, int tabl, complex_float *in, complex_float *out_buffer, int n_in, int dec, int dec2, double f0, double rate)
if (len(z_in)-self.dec2)/self.dec < n_out:
print("not enough input samples %d %d %d %d"%(len(z_in),self.dec2,self.dec,n_out))
libdc.consume(self.chirpt,
self.dt,
self.sintab,
self.tab_len,
z_in,
z_out,
n_out,
self.dec,
self.dec2,
self.f0,
self.rate,
self.wfun,
self.n_threads)
self.chirpt+=float(n_out*self.dec)*self.dt
def advance_time(self,
n_samples):
self.chirpt+=float(n_samples)*self.dt
def chirp(L,f0=-12.5e6,cr=100e3,sr=25e6):
"""
Generate a chirp.
"""
tv=n.arange(L,dtype=n.float64)/sr
dphase=0.5*tv**2*cr*2*n.pi
chirpv=n.array(n.exp(1j*n.mod(dphase,2*n.pi))*n.exp(1j*2.0*n.pi*f0*tv),dtype=n.complex64)
return(chirpv)
if __name__ == "__main__":
cdc=chirp_downconvert(dec=2500)
# test. this should downconvert to a DC signal
z_in=chirp(L=25000000+5000)
z_out=n.zeros(1000,dtype=n.complex64)
import time
cput0=time.time()
cdc.consume(z_in,z_out,1000)
cput1=time.time()
print((cput1-cput0)/0.1)
import matplotlib.pyplot as plt
plt.plot(z_out.real)
plt.plot(z_out.imag)
plt.savefig("./chirp_lib.pdf") | PypiClean |
/Drupdates-1.5.2.tar.gz/Drupdates-1.5.2/drupdates/updates.py | import os, shutil, yaml, sys
from os.path import expanduser
from string import Template
from drupdates.utils import Utils
from drupdates.settings import Settings
from drupdates.settings import DrupdatesError
from drupdates.constructors.repos import Repos
from drupdates.constructors.reports import Reports
class Updates(object):
""" Run through the working directories and sites updating them. """
def __init__(self):
self.settings = Settings()
self.install()
self.utilities = Utils()
self.working_dirs = self.settings.get('workingDir')
self.single_site = ''
self.alias_file = None
if isinstance(self.working_dirs, str):
self.working_dirs = [self.working_dirs]
# by design, SingleSite setting only works with single working directory
if len(self.working_dirs) == 1:
self.single_site = self.settings.get('singleSite')
def install(self):
""" Basic Installation of Drupdates. """
base_dir = self.settings.get('baseDir')
backup_dir = self.settings.get('backupDir')
dirs = [backup_dir, base_dir]
for directory in dirs:
Utils.check_dir(directory)
current_dir = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(current_dir, "templates/settings.template")
settings_file = os.path.join(Utils.check_dir(base_dir), 'settings.yaml')
instructions_url = "http://drupdates.readthedocs.org/en/latest/setup/"
if not os.path.isfile(settings_file):
shutil.copy(src, settings_file)
msg = "The Settings file {0} was created and needs updated.\n".format(settings_file)
msg += "See {0} for instructions".format(instructions_url)
print(msg)
sys.exit(1)
current_settings = open(settings_file, 'r')
settings = yaml.load(current_settings)
if 'repoDict' in settings and 'example' in settings['repoDict']['value']:
msg = "The default Settings file, {0}, needs updated. \n ".format(settings_file)
msg += "See {0} for instructions".format(instructions_url)
print(msg)
sys.exit(1)
def run_updates(self):
""" Drupdates main function. """
if self.settings.get('debug'):
self.utilities.write_debug_file()
report = {}
for current_working_dir in self.working_dirs:
try:
current_working_dir = Utils.check_dir(current_working_dir)
self.utilities.load_dir_settings(current_working_dir)
update = self.update_sites(current_working_dir)
report[current_working_dir] = update
except DrupdatesError as update_error:
report[current_working_dir] = update_error.msg
if update_error.level >= 30:
break
else:
continue
try:
reporting = Reports()
except DrupdatesError as reports_error:
print("Reporting error: \n {0}".format(reports_error.msg))
sys.exit(1)
reporting.send(report)
def update_sites(self, working_dir):
""" Run updates for a working directory's sites. """
report = {}
self.aliases(working_dir)
blacklist = self.settings.get('blacklist')
sites = Repos().get()
if self.single_site:
sites = {self.single_site : sites[self.single_site]}
for site_name, ssh in sites.items():
if self.settings.get('verbose'):
msg = "Drupdates is working on the site: {0} ...".format(site_name)
print(msg)
report[site_name] = {}
if site_name in blacklist:
continue
self.utilities.load_dir_settings(working_dir)
for phase in self.settings.get("drupdatesPhases"):
mod = __import__('drupdates.' + phase['name'].lower(), fromlist=[phase])
class_ = getattr(mod, phase['name'])
instance = class_(site_name, ssh, working_dir)
result = ''
try:
call = getattr(instance, phase['method'])
result = call()
except DrupdatesError as error:
result = error.msg
if error.level < 30:
break
if error.level >= 30:
msg = "Drupdates: fatal error\n Drupdates returned: {0}".format(result)
raise DrupdatesError(error.level, msg)
finally:
report[site_name][phase['name']] = result
self.settings.reset()
self.delete_files()
return report
def aliases(self, working_dir):
""" Build a Drush alias file in $HOME/.drush, with alises to be used later.
Notes:
The file name is controlled by the drushAliasFile settings
All of the aliases will be prefixed with "drupdates" if the default file name
is retained
"""
alias_file_name = self.settings.get('drushAliasFile')
drush_folder = os.path.join(expanduser('~'), '.drush')
self.alias_file = os.path.join(drush_folder, alias_file_name)
if not os.path.isdir(drush_folder):
try:
os.makedirs(drush_folder)
except OSError as error:
msg = "Could not create ~/.drush folder \n Error: {0}".format(error.strerror)
raise DrupdatesError(30, msg)
current_dir = os.path.dirname(os.path.realpath(__file__))
# Symlink the Drush aliases file
src = os.path.join(current_dir, "templates/aliases.template")
doc = open(src)
template = Template(doc.read())
doc.close()
try:
filepath = open(self.alias_file, 'w')
except OSError as error:
msg = "Could not create {0} file\n Error: {1}".format(self.alias_file, error.strerror)
raise DrupdatesError(30, msg)
webroot_dir = self.settings.get('webrootDir')
filepath.write(template.safe_substitute(path=working_dir,
webroot=webroot_dir))
filepath.close()
def delete_files(self):
""" Clean up files used by Drupdates. """
if os.path.isfile(self.alias_file):
try:
os.remove(self.alias_file)
except OSError as error:
msg = "Clean-up error, couldn't remove {0}\n".format(self.alias_file)
msg += "Error: {1}".format(error.strerror)
print(msg)
return True | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/reg/cluster/__init__.py | from typing import List, Optional, Tuple
import itertools
import pandas as pd
from dero.reg.ext_statsmodels import update_statsmodel_result_with_new_cov_matrix
StrOrNone = Optional[str]
StrOrNoneList = List[StrOrNone]
StrOrNoneListList = List[StrOrNoneList]
def estimate_model_handling_cluster(regdf: pd.DataFrame, model, cluster: List[str], **fit_kwargs):
"""
Handles multiway clustering through multiple estimations following
Cameron, Gelbach, and Miller (2011).
"""
cluster_groups = _multiway_cluster_groups(cluster)
if len(cluster_groups) == 0:
raise ValueError(f'did not get any cluster groups, yet cluster was called with {cluster}')
for i, cluster_group in enumerate(cluster_groups):
result = _estimate_model_handling_single_cluster(regdf, model, cluster_group, **fit_kwargs)
cluster_group_cov_matrix = result.cov_params()
if i == 0:
cov_matrix = cluster_group_cov_matrix
else:
# Handle combining the covariance matrices across the different cluster estimations
# Follow eq 2.13 in CGM (2011), where odd number of cluster groups are added
# and even number of cluster groups are subtracted
sign = _negative_one_if_even_positive_one_if_odd(len(cluster_group))
cov_matrix = cov_matrix + (sign * cluster_group_cov_matrix)
# All parameter estimates should be identical, so can just override last result's cov matrix to
# get final result
update_statsmodel_result_with_new_cov_matrix(result, cov_matrix)
return result
def _estimate_model_handling_single_cluster(regdf: pd.DataFrame, model, cluster: List[str], **fit_kwargs):
cluster_ids = _cluster_group_id_series(
regdf,
cluster
)
result = model.fit(cov_type='cluster', cov_kwds={'groups': cluster_ids}, **fit_kwargs)
return result
def _multiway_cluster_groups(cluster_vars: List[str]) -> List[List[str]]:
"""
Transforms cluster_vars into the sets of cluster variables on which to run individual
regressions, following Cameron, Gelbach, and Miller (2011).
"""
cluster_vectors = _cluster_vars_to_cluster_vector_lists(cluster_vars)
all_cluster_groups = []
for group_tuple in itertools.product(*cluster_vectors):
# group_tuple may come with Nones, such as ('Firm', None), or (None, None)
# we only want to extract the non Nones
valid_items = tuple([item for item in group_tuple if item is not None])
if len(valid_items) > 0:
all_cluster_groups.append(valid_items)
# Remove duplicates and convert tuples to lists
all_cluster_groups = [list(group) for group in set(all_cluster_groups)]
return all_cluster_groups
def _cluster_vars_to_cluster_vector_lists(cluster_vars: List[str]) -> StrOrNoneListList:
"""
Transforms cluster_vars into a format which can be used with itertools.product.
E.g. cluster_vars = ['Firm', 'Date'] -> [
['Firm', None],
[None, 'Date']
]
and cluster_vars = ['Firm', 'Date', 'Portfolio'] -> [
['Firm', None, None],
[None, 'Date', None],
[None, None, 'Portfolio']
]
"""
num_items = len(cluster_vars)
all_lists = []
for i, cluster_var in enumerate(cluster_vars):
output_list = [None] * num_items
output_list[i] = cluster_var
all_lists.append(output_list)
return all_lists
def _cluster_group_id_series(df: pd.DataFrame, cluster_vars: List[str]) -> pd.Series:
unique_groups = df[cluster_vars].drop_duplicates()
unique_groups['_group_id'] = range(0, len(unique_groups))
return df[cluster_vars].merge(unique_groups, how='left', on=cluster_vars)['_group_id']
def _negative_one_if_even_positive_one_if_odd(num: int) -> int:
if _is_even(num):
return -1
else:
return 1
def _is_even(num: int) -> bool:
return num % 2 == 0 | PypiClean |
/DeepGMAP-0.2.0.tar.gz/DeepGMAP-0.2.0/deepgmap/data_preprocessing_tools/inputfileGenerator_multiple_label3.py | import numpy as np
import time
import os.path
import multiprocessing
import sys
import random
#from seq_to_binary import AGCTtoArray2
#import pyximport; pyximport.install()
#import seq_to_binary2
#import seq_to_binary2 as sb2
import importlib as il
sb2=il.import_module("deepgmap.data_preprocessing_tools.seq_to_binary2")
import getopt
import datetime
#convert DNA sequences to a dictionary of onehot vector
def seqtobinarydict(file_, lpos, _chr_to_skip="chr2"):
lpos=set(lpos)
binaryDNAdict=[]
binaryDNAdict_append=binaryDNAdict.append
position=[]
position_append=position.append
seqdata=[]
s=0
skip=False
seqlen=0
duration=0.0
i=0
skipped_chr=''
for line in file_:
i+=1
#start=time.time()
if line[0]=='>':
if line.startswith(">"+_chr_to_skip+":"):
if skipped_chr=='':
sys.stdout.write("\r" +"skipping "+_chr_to_skip)
sys.stdout.flush()
skipped_chr=_chr_to_skip
skip=True
else:
a=line.strip('>\n')
if a not in lpos:
skip=True
if not len(seqdata)==0:
binaryDNAdict_append(seqdata)
#position_append(a)
seqdata=[]
continue
else:
skip=False
position_append(a)
if s%100000==0:
sys.stdout.write("\rconverting "+str(a))
sys.stdout.flush()
if not s==0 and not len(seqdata)==0:
#print(seqdata)
binaryDNAdict_append(seqdata)
seqdata=[]
s+=1
elif not line == '\n' and not line=='' and skip==False:
line=line.strip("\n")
seqdata=sb2.AGCTtoArray3(line,len(line))
if not len(seqdata)==0:
binaryDNAdict_append(seqdata)
return binaryDNAdict, position
def seqtobinarydict2(file_, lpos):
lpos=set(lpos)
binaryDNAdict=[]
binaryDNAdict_append=binaryDNAdict.append
position=[]
position_append=position.append
s=0
skip=False
for line in file_:
if line[0]=='>':
a=line.strip('>\n')
if a not in lpos:
skip=True
else:
skip=False
position_append(a)
if s%100000==0:
sys.stdout.write("\rconverting "+str(a))
sys.stdout.flush()
s+=1
elif not line == '\n' and not line=='' and skip==False:
line=line.strip("\n")
binaryDNAdict_append(sb2.AGCTtoArray3(line.encode('utf-8'),len(line)))
return binaryDNAdict, position
def array_saver(ooloop, index_list, binaryDNAdict_shuf,label_list_shuf, sample_num,out_dir):
#print "binaryDNAdict_shuf length under array_saver: "+str(len(binaryDNAdict_shuf))
for i in range(len(index_list)):
#print i*sample_num, (i*sample_num+sample_num), index_list[i]
data_array=np.array(binaryDNAdict_shuf[i*sample_num:(i*sample_num+sample_num)], np.int8)
#print np.sum(data_array)
labels=np.array(label_list_shuf[i*sample_num:(i*sample_num+sample_num)], np.int8)
#print np.shape(labels)
filename = out_dir+"batch_"+str(ooloop)+"_"+str(index_list[i])+".npz"
#print "saving "+str(filename)
try:
with open(filename, "wb") as output_file:
np.savez_compressed(output_file,labels=labels, data_array=data_array)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError:
print("Could not convert data")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def array_saver_one_by_one(index, binaryDNAdict_shuf,label_list_shuf,out_dir):
filename = out_dir+"batch_"+str(index)+".npz"
try:
with open(filename, "wb") as output_file:
np.savez_compressed(output_file,labels=label_list_shuf, data_array=binaryDNAdict_shuf)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
except ValueError:
print("Could not convert data")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def dicttoarray(binaryDNAdict,position, label_list,label_position,reduce_genome):
num_seq=len(binaryDNAdict)
x=0
y=0
shuf=range(num_seq)
random.shuffle(shuf)
binaryDNAdict_shuf=[]
binaryDNAdict_shuf_append=binaryDNAdict_shuf.append
label_list_shuf=[]
label_list_shuf_append=label_list_shuf.append
k=0
for i in shuf:
d=binaryDNAdict[i]
l=label_list[i]
dp=position[i]
lp=label_position[i]
r=random.random()
#print r, sum(l), reduce_genome
if sum(l)==0 and r<=reduce_genome:
#print k
k+=1
continue
else:
#print dp, lp
assert dp==lp, "position does not match: %r" %[dp, lp]
binaryDNAdict_shuf_append(d)
label_list_shuf_append(l)
if sum(l)==0:
x+=1
else:
y+=1
prog=100.0*float(k+y+x)//num_seq
if prog%10.0==0.0:
print(prog)
z=float(x)/float(y+x)
print(str(k)+" of negative sequences are skipped\n"+"negative/total="+str(z))
return binaryDNAdict_shuf, label_list_shuf
import gc
def main():
start=time.time()
try:
options, args =getopt.getopt(sys.argv[1:], 'i:l:o:p:s:r:', ['input_dir=','label=', 'output_dir=','process=','sample_number=','reduce_genome='])
except getopt.GetoptError as err:
print(str(err))
sys.exit(2)
if len(options)<3:
print('too few argument')
sys.exit(0)
sample_num=100
threads=10
reduce_genome=60
for opt, arg in options:
if opt in ('-i', '--input_dir'):
input_dir=arg
elif opt in ('-l', '--label'):
labeled_genome=arg
elif opt in ('-o', '--output_dir'):
output_dir=arg
elif opt in ('-p', '--process'):
threads=int(arg)
elif opt in ('-s', '--sample_number'):
sample_num=int(arg)
elif opt in ('-r', '--reduce_genome'):
reduce_genome=float(arg)
with open(labeled_genome, 'r') as f2:
lines=f2.readlines()
label_position, label_list=sb2.label_reader(lines, "chr2")
with open(input_dir, 'r') as f1:
binaryDNAdict, position=seqtobinarydict(f1)
"""
np_stock=input_dir+".h5"
if not os.path.exists(np_stock):
with open(input_dir, 'r') as f1:
binaryDNAdict, position=seqtobinarydict(f1)
h5f = h5py.File(input_dir+'.h5', 'w')
h5f.create_dataset('binaryDNAdict', data=binaryDNAdict, chunks=True)
h5f.create_dataset('position', data=position, chunks=True)
h5f.close()
#np.savez_compressed(input_dir, binaryDNAdict=binaryDNAdict, position=position)
else:
np_restore= h5py.File(np_stock,'r')
binaryDNAdict=np_restore["binaryDNAdict"][:]
position=np_restore["position"][:]
np_restore.close()"""
try:
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except OSError as exc:
if exc.errno != exc.errno.EEXIST:
raise
binaryDNAdict_shuf, label_list_shuf=dicttoarray(binaryDNAdict,position, label_list,label_position,reduce_genome)
dna_dict_length=len(binaryDNAdict_shuf)
print(len(label_list_shuf), dna_dict_length)
if dna_dict_length%threads==0:
batch=dna_dict_length//threads
else:
batch=dna_dict_length//threads+1
if dna_dict_length%sample_num==0:
total_num=dna_dict_length//(sample_num*threads)
else:
total_num=dna_dict_length//(sample_num*threads)+1
jobs = []
for i in range(threads):
#print str(len(binaryDNAdict_shuf[i*batch:(i+1)*batch]))+" are passed"
jobs.append(multiprocessing.Process(target=array_saver,
args=(range(i*total_num,(i+1)*total_num),
binaryDNAdict_shuf[i*batch:(i+1)*batch],
label_list_shuf[i*batch:(i+1)*batch],
sample_num, output_dir,)))
for j in jobs:
j.start()
for j in jobs:
j.join()
print("still working on something...")
except:
print("Unexpected error: "+str(sys.exc_info()[0]))
raise
gc.collect()
running_time=time.time()-start
print("Done!"+"\nTotal time: "+ str(datetime.timedelta(seconds=running_time)))
if __name__== '__main__':
main() | PypiClean |
/GraTeLPy-0.2.0.1.tar.gz/GraTeLPy-0.2.0.1/gratelpy/drawing.py | import networkx as nx
from networkx.algorithms import bipartite
try:
from matplotlib import pyplot as plt
import matplotlib
except ImportError:
print 'The drawing module requires matplotlib. Please install matplotlib.'
raise
def gratelpy_draw(G, positions=None, dictionary_complexes=None, dictionary_reactions=None, filename=None, subgraph=None, rnsize = 1600, cnsize = 1400):
# draws entire graph or subgraph (subgraph being a fragment)
# squares for reaction nodes, circles for complex nodes
# inscribes s, w (complex and reaction nodes) labels into nodes
# positions dictionary expected of the form {'si': [x_pos, y_pos], 'sj': ...}
# supplied dictionary_complexes expected of the form {i: 'descriptive name of s(i+1)'}
# supplied dictionary_reactions expected of the form {i: 'descriptive name of w(i+1)'}
# filename expected as string including prefix
font = {'family' : 'sans-serif','sans-serif':['Helvetica'],
'weight' : 'normal',
'size' : 26}
matplotlib.rc('font', **font)
# generate positions of nodes if not supplied
if positions is None:
positions = nx.spring_layout(G)
# generate and modify figure
fig = plt.figure(num=None, figsize=(20,10), dpi=80, facecolor='w', edgecolor='k')
fig_axis = fig.add_subplot(111)
fig_axis.axis('off')
# generate separate graphs for both complexes and reactions so we can draw them differently
substance_nodes = []
reaction_nodes = []
for n in G.nodes():
if G.node[n]['bipartite']==0 and n not in substance_nodes:
substance_nodes.append(n)
if G.node[n]['bipartite']==1 and n not in reaction_nodes:
reaction_nodes.append(n)
substance_graph = nx.DiGraph()
substance_graph.add_nodes_from(substance_nodes)
reaction_graph = nx.DiGraph()
reaction_graph.add_nodes_from(reaction_nodes)
# if drawing subgraph, then generate specifically edges that are to be displayed
if subgraph is not None:
edges_graph = nx.DiGraph()
edges_graph.add_nodes_from(substance_nodes+reaction_nodes)
for el in subgraph:
if len(el) == 2:
# edge
edges_graph.add_edge(el[0], el[1])
else:
if el[-1] == 'p':
edges_graph.add_edge(el[0], el[1])
edges_graph.add_edge(el[1], el[2])
elif el[-1] == 'n':
edges_graph.add_edge(el[0], el[1])
edges_graph.add_edge(el[2], el[1])
else:
raise
else:
edges_graph = None
# generate complex labels
if dictionary_complexes is None:
complex_labels = {}
for n in substance_graph.nodes():
complex_labels[n] = str(n)
else:
complex_labels = {}
for n in substance_graph.nodes():
complex_labels[n] = dictionary_complexes[int(n[1:])-1].translate(None, '[]')
# generate reaction labels
if dictionary_reactions is None:
reaction_labels = {}
for n in reaction_graph.nodes():
reaction_labels[n] = str(n)
else:
reaction_labels = {}
for n in reaction_graph.nodes():
reaction_labels[n] = dictionary_reactions[int(n[1:])-1].translate(None, '[]')
# draw substance and reaction nodes
nx.draw_networkx_nodes(substance_graph, positions, node_shape='o', node_size=cnsize, node_color='white')
nx.draw_networkx_nodes(reaction_graph, positions, node_shape='s', node_size=rnsize, node_color='white')
if subgraph is None:
nx.draw_networkx_edges(G, positions, width=2)
else:
nx.draw_networkx_edges(edges_graph, positions, width=2)
nx.draw_networkx_labels(substance_graph, positions, complex_labels, font_size=26)
nx.draw_networkx_labels(reaction_graph, positions, reaction_labels, font_size=26)
return fig | PypiClean |
/Digikam-DB-0.3.1.tar.gz/Digikam-DB-0.3.1/digikamdb/image_comments.py | import logging
import os
from datetime import datetime
from itertools import groupby
from typing import Iterable, List, Optional, Sequence, Tuple, Union
from sqlalchemy import Column, Integer, String, delete, text
from sqlalchemy.orm import relationship, validates
from .table import DigikamTable
from .properties import BasicProperties
log = logging.getLogger(__name__)
def _imageproperty_class(dk: 'Digikam'): # noqa: F821
return dk.images.ImageComment
class ImageComments(BasicProperties):
"""
Encapsulates ImageComments (caption and title).
Args:
parent: Corresponding Image object
type\\_: Comment type (1 for caption, 3 for title)
"""
#: Funktion returning the table class
_class_function = _imageproperty_class
#: Parent id column
_parent_id_col = '_imageid'
#: Key column
_key_col = ['_language', '_author']
#: Value column
_value_col = ['_comment', '_date']
#: Return ``None`` when select does not find a row
_raise_on_not_found = False
#: Remove item when set to ``None``
_remove_on_set_none = True
def __init__(self, parent: 'Image', type_: int): # noqa: F821
super().__init__(parent)
self._type = type_
def __repr__(self) -> str: # pragma: no cover
return '<ImageComments object type=%d>' % self._type
def _select_self(self) -> '~sqlalchemy.orm.Query': # noqa: F821
"""
Selects all comments with the riqht type of the parent object.
"""
return super()._select_self().filter_by(_type = self._type)
def _prop_attributes(self, prop: Union[str, int, Iterable, None], **values):
"""Adds type to the standard properties."""
values['_type'] = self._type
return super()._prop_attributes(prop, **values)
def _pre_process_key(self, prop: Union[str, Iterable, None]) -> Tuple:
"""Preprocesses key for [] operations."""
ret = list(super()._pre_process_key(prop))
if ret[0] is None or ret[0] == '':
ret[0] = 'x-default'
return tuple(ret)
class ImageTitles(ImageComments):
"""
Enables access to multilingual image titles.
Objects of this type are normally accessed through an :class:`Image`
object, see :attr:`~Image.titles`. In general, it is not necessary to
call the constructor directly.
Titles can be multilingual. Individual languages can be retrieved from the
Titles object like from a dict where the key is a string containing the
language. The language can be given as ``None`` or as an empty string,
both are replaced internally by **x-default**.
.. code-block:: python
c1 = img.titles[''] # Default language
c2 = img.titles['es-ES'] # Spanish
c3 = img.titles[None] # Default language
img.titles[''] = 'Some text' # sets the default title
Args:
parent: Image object the title belongs to.
"""
#: Value column
_value_col = '_comment'
def __init__(self, parent: 'Image'): # noqa: F821
# set type=3
super().__init__(parent, 3)
def __repr__(self) -> str: # pragma: no cover
return '<Titles for image %d>' % self._parent.id
def _post_process_key(self, key: Union[str, Tuple]) -> str:
"""Just remove author"""
key = super()._post_process_key(key)
if isinstance(key, tuple):
return key[0]
return key # pragma: no cover
def _post_process_value(self, value: 'DigikamObject') -> Tuple: # noqa: F821
"""Preprocesses values for [] operations."""
value = super()._post_process_value(value)
if isinstance(value, tuple):
return value[0] # pragma: no cover
return value
class ImageCaptions(ImageComments):
"""
Contains an image's captions.
An Image can have multiple captions: by different authors and in different
languages. Individual captions can be retrieved from the Captions object
like from a dict where the keys are either a string (containing the
language, the author defaults to ``None`` in this case) or a tuple
containing language and author. The language can be given as ``None`` or as
an empty string, both are replaced internally by **x-default**.
.. code-block:: python
c1 = img.captions[('', 'Fred')] # Default language, author Fred
c2 = img.captions['es-ES'] # Spanish, no author
c3 = img.captions[None] # Default language, no author
c4 = img.captions[('de-DE', 'Ralph')] # German, author Ralph
img.captions[''] = 'Some text' # sets the default caption
The caption's value is a tuple containing the caption text and the
caption's date. When setting the value, just the text can be given, and
the date will be set to ``None``
Args:
parent: Image object the title belongs to.
"""
def __init__(self, parent: 'Image'): # noqa: F821
# set type=1
super().__init__(parent, 1)
def __repr__(self) -> str: # pragma: no cover
return '<Captions for image %d>' % self._parent.id | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/plugins/internal/change_warn.py | import sys
from loguru import logger
from flexget import plugin
from flexget.event import event
logger = logger.bind(name='change')
found_deprecated = False
class ChangeWarn:
"""
Gives warning if user has deprecated / changed configuration in the root level.
Will be replaced by root level validation in the future!
Contains ugly hacks, better to include all deprecation warnings here during 1.0 BETA phase
"""
def on_task_start(self, task, config):
global found_deprecated
if 'torrent_size' in task.config:
logger.critical('Plugin torrent_size is deprecated, use content_size instead')
found_deprecated = True
if 'nzb_size' in task.config:
logger.critical('Plugin nzb_size is deprecated, use content_size instead')
found_deprecated = True
if found_deprecated:
task.manager.shutdown(finish_queue=False)
task.abort('Deprecated config.')
@event('plugin.register')
def register_plugin():
plugin.register(ChangeWarn, 'change_warn', builtin=True, api_ver=2)
@event('manager.startup')
def startup_warnings(manager):
if sys.version_info < (3, 7):
logger.warning(
"Python 3.6 is EOL as of December 2021. FlexGet will remove support for it soon. "
"Please upgrade to python 3.7 or later."
)
# check that no old plugins are in pre-compiled form (pyc)
try:
import os.path
plugin_dirs = (
os.path.normpath(sys.path[0] + '/../flexget/plugins/'),
os.path.normpath(sys.path[0] + '/../flexget/plugins/input/'),
)
for plugin_dir in plugin_dirs:
for name in os.listdir(plugin_dir):
require_clean = False
if name.startswith('module'):
require_clean = True
if name == 'csv.pyc':
require_clean = True
if 'resolver' in name:
require_clean = True
if 'filter_torrent_size' in name:
require_clean = True
if 'filter_nzb_size' in name:
require_clean = True
if 'module_priority' in name:
require_clean = True
if 'ignore_feed' in name:
require_clean = True
if 'module_manual' in name:
require_clean = True
if 'output_exec' in name:
require_clean = True
if 'plugin_adv_exec' in name:
require_clean = True
if 'output_transmissionrpc' in name:
require_clean = True
if require_clean:
logger.critical('-' * 79)
logger.critical('IMPORTANT: Your installation has some files from older FlexGet!')
logger.critical('')
logger.critical(
' Please remove all pre-compiled .pyc and .pyo files from {}', plugin_dir
)
logger.critical(' Offending file: {}', name)
logger.critical('')
logger.critical(
' After getting rid of these FlexGet should run again normally'
)
logger.critical('')
logger.critical('-' * 79)
found_deprecated = True
break
except Exception:
pass | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/profile.py | from euphorie.client import model
from euphorie.client.utils import HasText
from euphorie.content.interfaces import ICustomRisksModule
from euphorie.content.interfaces import IQuestionContainer
from euphorie.content.module import IModule
from euphorie.content.profilequestion import IProfileQuestion
from euphorie.content.risk import IFrenchRisk
from euphorie.content.risk import IRisk
from sqlalchemy import sql
from z3c.saconfig import Session
always_present_default = "no"
def AddToTree(
root,
node,
zodb_path=[],
title=None,
profile_index=0,
skip_children=False,
):
"""Add a new node to the session tree.
:param root: parent node of the new child
:type root: py:class:`euphorie.client.model.SurveySession` or
:py:class:`euphorie.client.model.SurveyTreeItem`
:param node: ZODB object to add to the node.
:type node: one of the :py:mod:`euphorie.content` content types
:param zodb_path: list of ids of all parents of the root in the session
tree
:param title: title for the generated node. Defaults to the title of the
ZODB object
:type title: unicode or None
:param int profile_index: profile answer index number.
"""
title = title or node.title
if title:
title = title[:500]
if IQuestionContainer.providedBy(node):
child = model.Module(title=title, module_id=node.id)
child.has_description = HasText(node.description)
if IModule.providedBy(node):
child.solution_direction = HasText(node.solution_direction)
# All optional modules default to "skip". The user needs to
# actively decide that the module is relevant for them.
if node.optional:
child.skip_children = True
child.has_description = True
else:
child.postponed = False
elif IRisk.providedBy(node):
priority = getattr(node, "default_priority", None)
if priority == "none":
priority = None
if IFrenchRisk.providedBy(node):
effect = node.default_severity
else:
effect = node.default_effect
child = model.Risk(
title=title,
risk_id=node.id,
risk_type=node.type,
skip_evaluation=(node.evaluation_method == "fixed"),
probability=node.default_probability,
frequency=node.default_frequency,
effect=effect,
priority=priority,
)
child.skip_children = False
child.postponed = False
child.has_description = HasText(node.description)
if node.type in ["top5", "policy"]:
child.priority = "high"
if node.risk_always_present and always_present_default:
child.identification = always_present_default
else:
return None # Should never happen
zodb_path = zodb_path + [node.id]
child.zodb_path = "/".join(zodb_path)
child.profile_index = profile_index
root.addChild(child)
if IQuestionContainer.providedBy(node) and not skip_children:
for grandchild in node.values():
AddToTree(child, grandchild, zodb_path, None, profile_index)
return child
def get_custom_risks(session):
if session is None:
return []
query = (
Session.query(model.Risk)
.filter(
sql.and_(
model.Risk.is_custom_risk == True, # noqa: E712
model.Risk.path.startswith(model.Module.path),
model.Risk.session_id == session.id,
)
)
.order_by(model.Risk.id)
)
return query.all()
def BuildSurveyTree(survey, profile, dbsession, old_session=None):
"""(Re)build the survey SQL tree. The existing tree for the session is
deleted before a new tree is created.
:param survey: survey to build tree for
:type survey: :py:class:`euphorie.content.survey.Survey`
:param profile: desired profile to be used for the tree
:type profile: dictionary
:param dbsession: session to build tree in. Defaults to currently active
session.
:type dbsession: :py:class:`euphorie.client.model.SurveySession`
"""
dbsession.reset()
for child in survey.values():
if ICustomRisksModule.providedBy(child):
AddToTree(dbsession, child)
# Now copy over the custom risks
risks = get_custom_risks(old_session)
if risks:
# find the module that holds the custom risks
modules = (
Session.query(model.Module)
.filter(
sql.and_(
model.Module.session_id == dbsession.id,
model.Module.module_id == child.id,
)
)
.all()
)
# there should only ever be 1 result
if modules:
for risk in risks:
modules[0].addChild(risk)
elif IProfileQuestion.providedBy(child):
# Safeguard against double adding of profile questions
existing = [getattr(item, "module_id") for item in dbsession.children()]
if child.id in existing:
continue
p = profile.get(child.id)
if not p:
continue
if isinstance(p, list):
profile_question = AddToTree(
dbsession,
child,
title=child.title,
profile_index=-1,
skip_children=True,
)
for index, title in enumerate(p):
AddToTree(profile_question, child, title=title, profile_index=index)
# If we get a bool, it will be True, because of `if not p` above
# Simply add the profile to the tree, don't care about locations
elif isinstance(p, bool):
AddToTree(dbsession, child, title=child.title)
else:
AddToTree(dbsession, child)
def extractProfile(survey, survey_session):
"""Determine the current profile for given current survey session.
:param survey: current survey
:type survey: :py:class:`euphorie.content.survey.Survey`
:param survey_session: current survey session
:type survey_session: :py:class:`euphorie.client.model.SurveySession`
:rtype: dictionary with profile answers
The profile is returned as a dictionary. The id of the profile questions
are used as keys. For optional profile questions the value is a boolean.
For repetable profile questions the value is a list of titles as provided
by the user. This format is compatible with
:py:meth:`Profile.getDesiredProfile`.
"""
questions = [
{"id": child.id, "use_location_question": child.use_location_question}
for child in survey.ProfileQuestions()
]
if not questions:
return {}
q_ids = [q["id"] for q in questions]
session_modules = {}
query = (
Session.query(model.SurveyTreeItem.zodb_path, model.SurveyTreeItem.title)
.filter(model.SurveyTreeItem.type == "module")
.filter(model.SurveyTreeItem.session == survey_session)
.filter(model.SurveyTreeItem.profile_index >= 0)
.filter(model.SurveyTreeItem.zodb_path.in_(q_ids))
.order_by(model.SurveyTreeItem.profile_index)
)
for row in query:
session_modules.setdefault(row.zodb_path, []).append(row)
profile = {}
for question in questions:
nodes = session_modules.get(question["id"], [])
if not question["use_location_question"]:
profile[question["id"]] = bool(nodes)
else:
profile[question["id"]] = [node.title for node in nodes]
return profile | PypiClean |
/NDBC-1.2.0.tar.gz/NDBC-1.2.0/changelog.md | # NDBC Package Change Log
---
### v1.1.1
- bug(fix) Correct issue with alpha characters in station ID string [#32](https://github.com/GenSci/NDBC/issues/32)
- bug(fix) Prevent continuous year looping when URLs fail.
### v.1.1.0
- Added support for multiple data package retrieval through a more general `get_data()` function.
- Added deprecation flags to `get_stdmet()` function, which is being replaced by more general `get_data()`.
- Improved bad data flag detection and replacement with `np.NAN` values.
- Added property decorated getter functions for all identified data packages
### v.1.0.1
- Modified month URL kwargs to better handle selecting months in the previous year. (fixes issue #20)
- Added `__repr__()` method so users can see the station ID when calling the object in the terminal
- Added demonstration Jupyter Notebook to make package usage clear.
| PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20/plugins/extra/wai-lang/lib/languages.js | define( [ 'aloha', 'aloha/jquery', 'flag-icons/flag-icons-plugin' ],
function( Aloha, jQuery, FlagIcons ) {
return new ( Aloha.AbstractRepository.extend( {
/**
* Set of language codes
*/
languageCodes: [],
_constructor: function() {
this._super( 'wai-languages' );
},
/**
* Initialize WAI Languages, load the language file and prepare the data.
*/
init: function() {
// Load the language codes
jQuery.ajax( {
url : Aloha.getPluginUrl( 'wai-lang' ) + '/lib/language-codes.json',
dataType : 'json',
success : jQuery.proxy( this.storeLanguageCodes, this ),
error : this.errorHandler
} );
this.repositoryName = 'WaiLanguages';
},
markObject: function( obj, item ) {
//copied from wai-lang-plugin makeVisible to avoid a circular dependency
// We do not need to add this class here since it already being
// done in the wai-lang plugin
// jQuery( obj ).addClass( 'aloha-wai-lang' );
},
/**
* This method will invoked if a error occurres while loading data via ajax
*/
errorHandler: function( text, error ) {
//TODO log error here
},
/**
* Stores the retrieved language code data in this object
*/
storeLanguageCodes: function( data ) {
var that = this;
// Transform loaded json into a set of repository documents
jQuery.each( data, function( key, value ) {
var el = value;
el.id = key;
el.repositoryId = that.repositoryId;
el.type = 'language';
el.url = FlagIcons.path + '/img/flags/' + el.id + '.png';
// el.renditions.url = "img/flags/" + e.id + ".png";
// el.renditions.kind.thumbnail = true;
that.languageCodes.push( new Aloha.RepositoryDocument( el ) );
} );
},
/**
* Searches a repository for object items matching query if objectTypeFilter.
* If none found it returns null.
* Not supported: filter, orderBy, maxItems, skipcount, renditionFilter
*/
query: function( p, callback ) {
var query = new RegExp( '^' + p.queryString, 'i' ),
i,
d = [],
matchesName,
matchesType,
currentElement;
for ( i = 0; i < this.languageCodes.length; ++i ) {
currentElement = this.languageCodes[ i ];
matchesName = ( !p.queryString || currentElement.name.match( query ) || currentElement.nativeName.match( query ) );
matchesType = ( !p.objectTypeFilter || ( !p.objectTypeFilter.length ) || jQuery.inArray( currentElement.type, p.objectTypeFilter ) > -1 );
if ( matchesName && matchesType ) {
d.push( currentElement );
}
}
callback.call( this, d );
}
} ) )();
} ); | PypiClean |
/ETSProjectTools-0.6.0.tar.gz/ETSProjectTools-0.6.0/enthought/ets/graph.py | import os
import sys
# Project library imports
from enthought.ets.base_subcommand import BaseSubcommand
from pkg_resources import Requirement
class Graph(BaseSubcommand):
"""
The ets graph command.
"""
def __init__(self, subparsers):
"""
Constructor.
Overloaded to customize our parser configuration.
"""
# Create our parser.
desc = ('Uses graphiz to save a dependency graph of the specified'
'projects. At least one project must be specified. '
'The project dependencies are found by consulting a project map. '
'That map may be explicitly provided or generated by'
'inspecting a set of repositories.')
parser = subparsers.add_parser('graph',
description = desc,
help = '. . . %s' % desc,
)
# Add arguments
parser.add_argument('project',
nargs = "+",
help = 'Specifications of project(s) to retrieve. These are of '
'the same form as setuptools\' version specifications. For '
'example "ets==2.7.0" or "ets >=2.7, <3.0a"',
)
# Add the options
parser.add_argument('-e', '--extra',
dest = 'extra',
default = False,
action = 'store_true',
help = '''Display the project[extra] requirements as separate
nodes.'''
)
self.use_project_map(parser)
parser.add_argument('-o', '--output',
dest = 'path',
help = '''Specify the file in which the graph will be saved.
Filetype is guessed from extension (defaults to png).
Possibilities are png, ps, svg, jpg, svgz, tga, plain, or jpeg'''
)
self.proxy(parser)
# Save the entry point for running this command.
parser.set_defaults(func = self.main)
return
def main(self, args, cfg):
"""
Execute the ets graph command.
"""
try:
from pygraphviz import AGraph
except ImportError:
print >>sys.stderr, ('Cannot import pygraphviz. '
'You need pygraphviz for this operation.')
return
# Build up the set of projects we're interested in.
project_set = self.build_project_set(args, cfg)
project_set.add_dependencies()
# Only continue if the user specified at least one project.
if len(args.project) == 0:
return
# Build the graph, storing the nodes with extras, to be able to clean
# up the graph later on.
graph = AGraph(strict=True, directed=True)
extra_nodes = set()
# FIXME: Should not be accessing a protected member of the ProjectSet
# class!
for pinfo in project_set._projects.itervalues():
project_id = "%s %s" % (pinfo['name'], pinfo['version'])
# Add all the extra nodes and connect them to the core package
for extra in pinfo.get('extras_require', []):
graph.add_edge( ('%s[%s]' % (project_id, extra), project_id) )
extra_nodes.add('%s[%s]' % (project_id, extra))
# Satisfy the requirements for each extra needed
for extra in pinfo['extras']:
extra_requires = pinfo['extras_require'][extra]
for req in extra_requires:
for dependency_id in self.requirement_labels(req,
project_set):
graph.add_edge(('%s[%s]' % (project_id, extra),
dependency_id)
)
# Satisfy the install requirements
for req in pinfo['install_requires']:
for dependency_id in self.requirement_labels(req, project_set):
graph.add_edge((project_id, dependency_id))
if not args.extra:
self.remove_nodes(graph, extra_nodes)
graph.layout(prog='dot')
# Write to the output file. If no filename was specified, use the name
# and version of the root project.
if args.path is None or args.path == '':
name, version = project_set.get_root_project_info()
args.path = '%s_%s' % (name, version)
name, extension = os.path.splitext(args.path)
if not extension in ('png', 'ps', 'svg', 'jpg', 'svgz', 'tga',
'plain', 'jpeg'):
args.path += '.png'
graph.draw(args.path)
print "Dependency graph saved to %s" % args.path
return
def remove_nodes(self, graph, nodes):
"""
Remove nodes from a pygraphviz graph, reconnecting the edges
without the nodes.
"""
for node in nodes:
in_neighbors = graph.in_neighbors(node)
out_neighbors = graph.out_neighbors(node)
graph.delete_node(node)
for in_neighbor in in_neighbors:
for out_neighbor in out_neighbors:
graph.add_edge(in_neighbor, out_neighbor)
return
def requirement_labels(self, requirement, project_set):
"""
Return the list of labels of the bubles for a given requirement.
"""
requirement_info = Requirement.parse(requirement)
requirement_name = requirement_info.project_name
if requirement_name in project_set._projects:
dependency_info = project_set._projects[requirement_name]
dependency_id = "%s %s" % (
dependency_info['name'],
dependency_info['version'],
)
if len(requirement_info.extras)>0:
dependency_id = ["%s[%s]" % (dependency_id, extra)
for extra in requirement_info.extras]
return dependency_id
else:
# FIXME: Here we should mark this node as being
# different: the project is not hosted by us.
# but we need a recent version of pygraphviz.
#dependency_id = requirement_name
#graph.add_node(dependency_id, color='blue')
dependency_id = '%s -- External' % requirement_name
return (dependency_id, ) | PypiClean |
/Mopidy-WebLibrary-1.0.0.tar.gz/Mopidy-WebLibrary-1.0.0/mopidy_weblibrary/static/vendors/lastfm/lastfm.api.cache.js | Storage.prototype.setObject = function(key, value){
this.setItem(key, JSON.stringify(value));
}
/* Get an object from a Storage object. */
Storage.prototype.getObject = function(key){
var item = this.getItem(key);
return JSON.parse(item);
}
/* Creates a new cache object. */
function LastFMCache(){
/* Expiration times. */
var MINUTE = 60;
var HOUR = MINUTE * 60;
var DAY = HOUR * 24;
var WEEK = DAY * 7;
var MONTH = WEEK * 4.34812141;
var YEAR = MONTH * 12;
/* Methods with weekly expiration. */
var weeklyMethods = [
'artist.getSimilar',
'tag.getSimilar',
'track.getSimilar',
'artist.getTopAlbums',
'artist.getTopTracks',
'geo.getTopArtists',
'geo.getTopTracks',
'tag.getTopAlbums',
'tag.getTopArtists',
'tag.getTopTags',
'tag.getTopTracks',
'user.getTopAlbums',
'user.getTopArtists',
'user.getTopTags',
'user.getTopTracks'
];
/* Name for this cache. */
var name = 'lastfm';
/* Create cache if it doesn't exist yet. */
if(localStorage.getObject(name) == null){
localStorage.setObject(name, {});
}
/* Get expiration time for given parameters. */
this.getExpirationTime = function(params){
var method = params.method;
if((/Weekly/).test(method) && !(/List/).test(method)){
if(typeof(params.to) != 'undefined' && typeof(params.from) != 'undefined'){
return YEAR;
}
else{
return WEEK;
}
}
for(var key in this.weeklyMethods){
if(method == this.weeklyMethods[key]){
return WEEK;
}
}
return -1;
};
/* Check if this cache contains specific data. */
this.contains = function(hash){
return typeof(localStorage.getObject(name)[hash]) != 'undefined' &&
typeof(localStorage.getObject(name)[hash].data) != 'undefined';
};
/* Load data from this cache. */
this.load = function(hash){
return localStorage.getObject(name)[hash].data;
};
/* Remove data from this cache. */
this.remove = function(hash){
var object = localStorage.getObject(name);
object[hash] = undefined;
localStorage.setObject(name, object);
};
/* Store data in this cache with a given expiration time. */
this.store = function(hash, data, expiration){
var object = localStorage.getObject(name);
var time = Math.round(new Date().getTime() / 1000);
object[hash] = {
data : data,
expiration : time + expiration
};
localStorage.setObject(name, object);
};
/* Check if some specific data expired. */
this.isExpired = function(hash){
var object = localStorage.getObject(name);
var time = Math.round(new Date().getTime() / 1000);
if(time > object[hash].expiration){
return true;
}
return false;
};
/* Clear this cache. */
this.clear = function(){
localStorage.setObject(name, {});
};
}; | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.