repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
indirectlylit/kolibri | packages/kolibri-tools/lib/i18n/fonts.py | 2 | 19713 | # -*- coding: utf-8 -*-
"""
For usage instructions, see:
https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html
"""
from __future__ import unicode_literals
import argparse
import base64
import io
import json
import logging
import mimetypes
import os
import re
import sys
import tempfile
import noto_source
import utils
from fontTools import merge
from fontTools import subset
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
logging.getLogger("fontTools").setLevel(logging.WARNING)
logging.StreamHandler(sys.stdout)
"""
Constants
"""
OUTPUT_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.pardir,
os.pardir,
os.pardir,
os.pardir,
"kolibri",
"core",
"static",
"assets",
"fonts",
)
)
# Sets the source date epoch to 1/1/21 to prevent temporary files from
# getting different headers on each run, leading to non-glyph-related changes to
# their base64 encoding
# ref: https://github.com/fonttools/fonttools/issues/1135
os.environ["SOURCE_DATE_EPOCH"] = "1609459200000"
FONT_TOOLS_OPTIONS = subset.Options()
FONT_TOOLS_OPTIONS.flavor = "woff" # most widely supported format
FONT_TOOLS_OPTIONS.ignore_missing_unicodes = True # important for subsetting
# basic latin glyphs
NOTO_SANS_LATIN = "NotoSans"
# font family name conventions
SCOPE_FULL = "noto-full"
SCOPE_SUBSET = "noto-subset"
SCOPE_COMMON = "noto-common"
"""
Shared helpers
"""
_FONT_FACE = """
@font-face {{
font-family: '{family}';
src: url('{url}') format('woff');
font-style: normal;
font-weight: {weight};
unicode-range: {unicodes};
font-display: swap;
}}
"""
def _gen_font_face(family, url, is_bold, unicodes):
weight = "bold" if is_bold else "normal"
return _FONT_FACE.format(family=family, url=url, weight=weight, unicodes=unicodes)
def _scoped(scope, name):
return "{}.{}".format(scope, name)
@utils.memoize
def _woff_font_path(name, is_bold):
file_name = "{name}.{weight}.woff".format(
name=name, weight="700" if is_bold else "400"
)
return os.path.join(OUTPUT_PATH, file_name)
def _load_font(path):
guess = mimetypes.guess_type(path)
if guess[0] not in [
"font/ttc",
"font/ttf",
"font/otf",
"font/woff",
"application/font-sfnt",
"application/font-woff",
]:
logging.error("Not a font file: {}".format(path))
logging.error("Guessed mimetype: '{}'".format(guess[0]))
logging.error("If this is a text file: do you have Git LFS installed?")
sys.exit(1)
try:
return subset.load_font(path, FONT_TOOLS_OPTIONS, dontLoadGlyphNames=True)
except FileNotFoundError as e: # noqa F821
logging.error("Could not load font: {}".format(str(e)))
logging.error("You may need to run: `make i18n-download-source-fonts`")
sys.exit(1)
@utils.memoize
def _font_priorities(default_font):
"""
Given a default font, return a list of all possible font names roughly in the order
that we ought to look for glyphs in. Many fonts contain overlapping sets of glyphs.
Without doing this: we risk loading a bunch of random font files just because they
happen to contain one of the glyphs, and we also risk loading the 'wrong' version
of the glyphs if they happen to differ.
"""
# start with the default
font_names = [default_font]
# look in the latin set next
if default_font is not NOTO_SANS_LATIN:
font_names.append(NOTO_SANS_LATIN)
# then look at the rest of the supported languages' default fonts
for lang_info in utils.available_languages():
name = lang_info[utils.KEY_DEFAULT_FONT]
if name not in font_names:
font_names.append(name)
# finally look at the remaining langauges
font_names.extend([fn for fn in noto_source.FONT_MANIFEST if fn not in font_names])
return font_names
@utils.memoize
def _font_glyphs(font_path):
"""
extract set of all glyphs from a font
"""
glyphs = set()
for table in _load_font(font_path)["cmap"].tables:
glyphs |= set(table.cmap.keys())
return glyphs
def _clean_up(scope):
"""
Delete all files in OUTPUT_PATH that match the scope
"""
css_pattern = r"{}.*?\.css".format(scope)
woff_pattern = r"{}.*?\.woff".format(scope)
for name in os.listdir(OUTPUT_PATH):
if re.match(css_pattern, name) or re.match(woff_pattern, name):
os.unlink(os.path.join(OUTPUT_PATH, name))
"""
CSS helpers
"""
CSS_HEADER = """
/*
* This is an auto-generated file, so any manual edits will be overridden.
*
* To regenerate, see instructions here:
* https://kolibri-dev.readthedocs.io/en/develop/references/i18n.html
*
* This file was generated by build_tools/i18n/fonts.py
*/
"""
def _list_to_ranges(input_list):
"""
Iterator of ranges of contiguous numbers from a list of integers.
Ranges returned are [x, y) – in other words, y is non-inclusive.
(from: http://code.activestate.com/recipes/496682/)
"""
new_list = list(input_list)
new_list.sort()
start = new_list[0]
currentrange = [start, start + 1]
for item in new_list[1:]:
if currentrange[1] == item:
currentrange[1] += 1 # contiguous
else:
yield tuple(currentrange) # new range start
currentrange = [item, item + 1]
yield tuple(currentrange) # last range
def _fmt_code(code):
return "{:x}".format(code).upper()
def _fmt_range(glyphs):
"""
Generates a font-face-compatible 'unicode range' attribute for a given set of glyphs
"""
fmt_ranges = []
for r in _list_to_ranges(sorted(glyphs)):
if r[0] == r[1] - 1:
fmt_ranges.append("U+{}".format(_fmt_code(r[0])))
else:
fmt_ranges.append("U+{}-{}".format(_fmt_code(r[0]), _fmt_code(r[1] - 1)))
return ",".join(fmt_ranges)
"""
Full Fonts
"""
def _full_font_face(font_family, font_name, is_bold, omit_glyphs=set()):
"""
generate the CSS reference for a single full font
"""
file_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)
file_name = os.path.basename(file_path)
glyphs = _font_glyphs(file_path) - omit_glyphs
if not glyphs:
return ""
return _gen_font_face(
font_family, file_name, is_bold=is_bold, unicodes=_fmt_range(glyphs)
)
def _gen_full_css_modern(lang_info):
"""
Generates listing for all full fonts, segmented by unicode ranges and weights
"""
# skip previously accounted for glyphs so there is no overlap between font-faces
previous_glyphs = set()
# all available fonts
font_faces = []
for font_name in _font_priorities(lang_info[utils.KEY_DEFAULT_FONT]):
font_faces.append(
_full_font_face(
SCOPE_FULL, font_name, is_bold=False, omit_glyphs=previous_glyphs
)
)
font_faces.append(
_full_font_face(
SCOPE_FULL, font_name, is_bold=True, omit_glyphs=previous_glyphs
)
)
# Assumes all four variants have the same glyphs, from the content Regular font
previous_glyphs |= _font_glyphs(
_woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)
)
output_name = os.path.join(
OUTPUT_PATH,
"{}.modern.css".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),
)
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
f.write("".join(font_faces))
def _gen_full_css_basic(lang_info):
output_name = os.path.join(
OUTPUT_PATH,
"{}.basic.css".format(_scoped(SCOPE_FULL, lang_info[utils.KEY_INTL_CODE])),
)
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
default_font = lang_info[utils.KEY_DEFAULT_FONT]
f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=False))
f.write(_full_font_face(SCOPE_FULL, default_font, is_bold=True))
def _write_full_font(font_name, is_bold):
font = _load_font(noto_source.get_path(font_name, is_bold=is_bold))
output_name = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=is_bold)
logging.info("Writing {}".format(output_name))
font.save(output_name)
def command_gen_full_fonts():
logging.info("generating full fonts...")
_clean_up(SCOPE_FULL)
for font_name in noto_source.FONT_MANIFEST:
_write_full_font(font_name, is_bold=False)
_write_full_font(font_name, is_bold=True)
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang_info in languages:
_gen_full_css_modern(lang_info)
_gen_full_css_basic(lang_info)
logging.info("finished generating full fonts")
"""
Subset fonts
"""
def _chunks(string, n=72):
"""
Yield successive n-sized chunks from string
"""
for i in range(0, len(string), n):
yield string[i : i + n]
def _write_inline_font(file_object, font_path, font_family, is_bold):
"""
Inlines a font as base64 encoding within a CSS file
"""
with io.open(font_path, mode="rb") as f:
data = f.read()
data_uri = "data:application/x-font-woff;charset=utf-8;base64,\\\n{}".format(
"\\\n".join(_chunks(base64.b64encode(data).decode()))
)
glyphs = _font_glyphs(font_path)
if not glyphs:
return
file_object.write(
_gen_font_face(
family=font_family,
url=data_uri,
is_bold=is_bold,
unicodes=_fmt_range(glyphs),
)
)
def _generate_inline_font_css(name, font_family):
"""
Generate CSS and clean up inlined woff files
"""
font_path_reg = _woff_font_path(name, is_bold=False)
font_path_bold = _woff_font_path(name, is_bold=True)
output_name = os.path.join(OUTPUT_PATH, "{}.css".format(name))
logging.info("Writing {}".format(output_name))
with open(output_name, "w") as f:
f.write(CSS_HEADER)
_write_inline_font(f, font_path_reg, font_family, is_bold=False)
_write_inline_font(f, font_path_bold, font_family, is_bold=True)
os.unlink(font_path_reg)
os.unlink(font_path_bold)
def _get_subset_font(source_file_path, text):
"""
Given a source file and some text, returns a new, in-memory fontTools Font object
that has only the glyphs specified in the set.
Note that passing actual text instead of a glyph set to the subsetter allows it to
generate appropriate ligatures and other features important for correct rendering.
"""
if not os.path.exists(source_file_path):
logging.error("'{}' not found".format(source_file_path))
font = _load_font(source_file_path)
subsetter = subset.Subsetter(options=FONT_TOOLS_OPTIONS)
subsetter.populate(text=text)
subsetter.subset(font)
return font
def _get_lang_strings(locale_dir):
"""
Text used in a particular language
"""
strings = []
for file_name in os.listdir(locale_dir):
if not file_name.endswith(".json"):
continue
file_path = os.path.join(locale_dir, file_name)
with io.open(file_path, mode="r", encoding="utf-8") as f:
lang_strings = json.load(f).values()
for s in lang_strings:
s = re.sub(r"\W", " ", s) # clean whitespace
strings.append(s)
strings.append(s.upper())
return strings
@utils.memoize
def _get_common_strings():
"""
Text useful for all languages: displaying the language switcher, Kolibri version
numbers, symbols, and other un-translated text
"""
# Special characters that are used directly in untranslated template strings.
# Search the codebase with this regex to find new ones: [^\x00-\x7F©–—…‘’“”•→›]
strings = [
chr(0x0), # null
"©",
"–", # en dash
"—", # em dash
"…",
"‘",
"’",
"“",
"”",
"•",
"●",
"→",
"›",
]
# all the basic printable ascii characters
strings.extend([chr(c) for c in range(32, 127)])
# text from language names, both lower- and upper-case
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang in languages:
strings.append(lang[utils.KEY_LANG_NAME])
strings.append(lang[utils.KEY_LANG_NAME].upper())
strings.append(lang[utils.KEY_ENG_NAME])
strings.append(lang[utils.KEY_ENG_NAME].upper())
return strings
def _merge_fonts(fonts, output_file_path):
"""
Given a list of fontTools font objects, merge them and export to output_file_path.
Implemenatation note: it would have been nice to pass the fonts directly to the
merger, but the current fontTools implementation of Merger takes a list of file names
"""
tmp = tempfile.gettempdir()
f_names = []
for i, f in enumerate(fonts):
tmp_font_path = os.path.join(tmp, "{}.woff".format(i))
f_names.append(tmp_font_path)
f.save(tmp_font_path)
merger = merge.Merger(options=FONT_TOOLS_OPTIONS)
merged_font = merger.merge(f_names)
merged_font.save(output_file_path)
logging.info("created {}".format(output_file_path))
def _cannot_merge(font):
# all fonts must have equal units per em for merging, and 1000 is most common
return font["head"].unitsPerEm != 1000
def _subset_and_merge_fonts(text, default_font, subset_reg_path, subset_bold_path):
"""
Given text, generate both a bold and a regular font that can render it.
"""
reg_subsets = []
bold_subsets = []
skipped = []
# track which glyphs are left
remaining_glyphs = set([ord(c) for c in text])
for font_name in _font_priorities(default_font):
full_reg_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=False)
full_bold_path = _woff_font_path(_scoped(SCOPE_FULL, font_name), is_bold=True)
reg_subset = _get_subset_font(full_reg_path, text)
bold_subset = _get_subset_font(full_bold_path, text)
if _cannot_merge(reg_subset) or _cannot_merge(bold_subset):
skipped.append(font_name)
continue
reg_subsets.append(reg_subset)
bold_subsets.append(bold_subset)
remaining_glyphs -= _font_glyphs(full_reg_path)
if not remaining_glyphs:
break
_merge_fonts(reg_subsets, os.path.join(OUTPUT_PATH, subset_reg_path))
_merge_fonts(bold_subsets, os.path.join(OUTPUT_PATH, subset_bold_path))
def command_gen_subset_fonts():
"""
Creates custom fonts that attempt to contain all the glyphs and other font features
that are used in user-facing text for the translation in each language.
We make a separate subset font for common strings, which generally overaps somewhat
with the individual language subsets. This slightly increases how much the client
needs to download on first request, but reduces Kolibri's distribution size by a
couple megabytes.
"""
logging.info("generating subset fonts...")
_clean_up(SCOPE_COMMON)
_clean_up(SCOPE_SUBSET)
_subset_and_merge_fonts(
text=" ".join(_get_common_strings()),
default_font=NOTO_SANS_LATIN,
subset_reg_path=_woff_font_path(SCOPE_COMMON, is_bold=False),
subset_bold_path=_woff_font_path(SCOPE_COMMON, is_bold=True),
)
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang_info in languages:
logging.info("gen subset for {}".format(lang_info[utils.KEY_ENG_NAME]))
strings = []
strings.extend(_get_lang_strings(utils.local_locale_path(lang_info)))
strings.extend(_get_lang_strings(utils.local_perseus_locale_path(lang_info)))
name = lang_info[utils.KEY_INTL_CODE]
_subset_and_merge_fonts(
text=" ".join(strings),
default_font=lang_info[utils.KEY_DEFAULT_FONT],
subset_reg_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=False),
subset_bold_path=_woff_font_path(_scoped(SCOPE_SUBSET, name), is_bold=True),
)
# generate common subset file
_generate_inline_font_css(name=SCOPE_COMMON, font_family=SCOPE_COMMON)
# generate language-specific subset font files
languages = utils.available_languages(include_in_context=True, include_english=True)
for lang in languages:
_generate_inline_font_css(
name=_scoped(SCOPE_SUBSET, lang[utils.KEY_INTL_CODE]),
font_family=SCOPE_SUBSET,
)
logging.info("subsets created")
"""
Add source fonts
"""
def command_update_font_manifest(ref):
noto_source.update_manifest(ref)
def command_download_source_fonts():
noto_source.fetch_fonts()
"""
Main
"""
def main():
"""
Generates files to support both 'basic' and a 'modern' browsers.
Both browsers get the common and language-specific application subset fonts inline
to load quickly and prevent a flash of unstyled text, at least for all application
text. Full font files are linked and will load asynchronously.
# Modern behavior
Newer browsers have full support for the unicode-range attribute of font-face
definitions, which allow the browser to download fonts as-needed based on the text
observed. This allows us to make _all_ font alphabets available, and ensures that
content will be rendered using the best font possible for all content, regardless
of selected app language.
# Basic behavior
Older browsers do not fully support the unicode-range attribute, and will eagerly
download all referenced fonts regardless of whether or not they are needed. This
would have an unacceptable performance impact. As an alternative, we provide
references to the full fonts for the user's currently-selected language, under the
assumption that most of the content they use will be in that language.
Content viewed in other languages using the basic variant should still usually
display, albeit using system fonts.
"""
description = "\n\nProcess fonts.\nSyntax: [command] [branch]\n\n"
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command")
subparsers.add_parser(
"update-font-manifest",
help="Update manifest from https://github.com/googlei18n/noto-fonts/",
).add_argument(
"--ref",
help="Github reference, e.g. commit or tag. Defaults to head of master.",
type=str,
)
subparsers.add_parser(
"download-source-fonts",
help="Download sources from https://github.com/googlei18n/noto-fonts/",
)
subparsers.add_parser(
"generate-subset-fonts", help="Generate subset fonts based on app text"
)
subparsers.add_parser("generate-full-fonts", help="Generate full fonts")
args = parser.parse_args()
if args.command == "update-font-manifest":
command_update_font_manifest(args.ref)
elif args.command == "download-source-fonts":
command_download_source_fonts()
elif args.command == "generate-subset-fonts":
command_gen_subset_fonts()
elif args.command == "generate-full-fonts":
command_gen_full_fonts()
else:
logging.warning("Unknown command\n")
parser.print_help(sys.stderr)
sys.exit(0)
if __name__ == "__main__":
main()
| mit | -384,668,701,977,580,200 | 29.397218 | 89 | 0.644989 | false | 3.494492 | false | false | false |
silicon-mountain/github-users-africa | step3_extend_users.py | 4 | 7319 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import os
import json
import time
from dateutil import parser as du_parser
from datetime import datetime
import StringIO
import logging
import requests
from requests.auth import HTTPBasicAuth
import html5lib
from html5lib import treebuilders
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
last_id = None
one_minute = 60
one_hour = one_minute * 60
min_remaining_tostop = 30
reqs = 0
# reqs_limit = None
# reqs_remaining = None
headers = {}
TOKEN_AUTH = HTTPBasicAuth(GITHUB_TOKEN, "x-oauth-basic")
def check_limits(headers):
reqs_limit = int(headers.get('X-RateLimit-Limit', 0))
reqs_remaining = int(headers.get('X-RateLimit-Remaining', 0))
if reqs_remaining <= min_remaining_tostop:
logger.info("Reached %d requests over %d. Pausing one hour."
% (reqs_limit - reqs_remaining, reqs_limit))
pause(one_hour)
def pause(duration):
''' basic sleep with periodic logging (to show progess) '''
interval = 10
tick = duration / interval
for i in xrange(interval):
logger.info(u"Pause (%dmn) Elapsed: %dmn" % (duration / one_minute,
tick * i / one_minute))
time.sleep(tick)
existing_users = json.load(open('step2.json'))
try:
all_users = json.load(open('step3.json'))
except:
all_users = []
def getElementsByClassName(root, tag, className):
return [e for e in root.getElementsByTagName(tag)
if className in e.getAttribute('class')]
def extend_user(user):
print(user.get('username'))
def get_activity_from_html(username):
r = requests.get('https://github.com/%s' % username,
headers=headers, auth=TOKEN_AUTH)
if r.status_code == 404:
return None
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom = parser.parse(StringIO.StringIO(r.content))
divs = dom.getElementsByTagName('div')
contrib_columns = [d for d in divs
if 'contrib-column' in
d.getAttribute('class')]
if not len(contrib_columns):
return {'contrib_total_num': 0,
'contrib_total_start': None,
'contrib_total_end': None,
'contrib_long_num': 0,
'contrib_long_start': None,
'contrib_long_end': None}
total_str = getElementsByClassName(
contrib_columns[0], "span",
"contrib-number")[0].firstChild.nodeValue
# logger.debug("total_str: {}".format(total_str))
total_dates_dom = getElementsByClassName(
contrib_columns[0], "span", "text-muted")[1]
total_dates = "".join([n.nodeValue
for n in total_dates_dom.childNodes])
# logger.debug("total_dates: {}".format(total_dates))
total_start = du_parser.parse(total_dates.split(u'–')[0])
total_end = du_parser.parse(total_dates.split(u'–')[1])
# logger.debug("total_start: {}".format(total_start))
# logger.debug("total_end: {}".format(total_end))
long_str = getElementsByClassName(
contrib_columns[1], "span",
"contrib-number")[0].firstChild.nodeValue
# logger.debug("long_str: {}".format(long_str))
long_dates_dom = getElementsByClassName(
contrib_columns[1], "span", "text-muted")[1]
long_dates = "".join([n.nodeValue
for n in long_dates_dom.childNodes])
# logger.debug("total_dates: {}".format(total_dates))
# logger.debug("long_dates: {}".format(long_dates))
if long_dates == "No recent contributions":
long_start = None
long_end = None
else:
long_start = du_parser.parse(long_dates.split(u'–')[0].strip())
if long_start.year > total_end.year:
long_start = datetime(long_start.year - 1,
long_start.month, long_start.year.day)
long_end = du_parser.parse(long_dates.split(u'–')[1].strip())
if long_end.year > total_end.year:
long_end = datetime(long_end.year - 1, long_end.month,
long_end.year.day)
return {
'contrib_total_num': int(total_str.split()[0].replace(',', '')),
'contrib_total_start': total_start.isoformat(),
'contrib_total_end': total_end.isoformat(),
'contrib_long_num': int(long_str.split()[0].replace(',', '')),
'contrib_long_start':
long_start.isoformat() if long_start is not None else None,
'contrib_long_end':
long_end.isoformat() if long_end is not None else None}
def get_profile(user):
r = requests.get(
'https://api.github.com/users/%s' % user.get('username'),
headers=headers, auth=TOKEN_AUTH)
check_limits(r.headers)
nd = {}
data = json.loads(r.content)
for col in data.keys():
if 'url' in col and not col == 'avatar_url':
continue
if col in user.keys():
continue
nd.update({col: data[col]})
return nd
def get_orgs(username):
orgs = {}
r = requests.get('https://api.github.com/users/%s/orgs' % username,
headers=headers, auth=TOKEN_AUTH)
check_limits(r.headers)
data = json.loads(r.content)
orgs.update({'orgs_num': len(data)})
for i, org in enumerate(data):
org_name = org.get('login')
prefix = 'org%d_' % i
rorg = requests.get('https://api.github.com/orgs/%s' % org_name,
headers=headers, auth=TOKEN_AUTH)
check_limits(rorg.headers)
data_org = json.loads(rorg.content)
nd = {}
for col in data_org.keys():
if 'url' in col and not col == 'avatar_url':
continue
nd.update({prefix + col: data_org[col]})
orgs.update(nd)
return orgs
try:
acitiviy = get_activity_from_html(user.get('username'))
except Exception as e:
logger.exception(e)
raise
acitiviy = {}
from pprint import pprint as pp ; pp(acitiviy)
if acitiviy is None:
return None
profile = get_profile(user)
orgs = get_orgs(user.get('username'))
user.update(acitiviy)
user.update(profile)
user.update(orgs)
return user
# extend_user({'username': 'tensystems'})
# raise
all_usernames = [u['username'] for u in all_users]
for user in existing_users:
if user['username'] in all_usernames:
continue
user_update = extend_user(user)
if user_update is None:
continue
all_users.append(user_update)
json.dump(all_users, open('step3.json', 'w'), indent=4)
json.dump(all_users, open('step3.json', 'w'), indent=4)
| apache-2.0 | -6,548,492,864,477,135,000 | 31.932432 | 77 | 0.565586 | false | 3.7705 | false | false | false |
cfoale/ILOCI | Receiver-OLED/pi/projects/OLEDPython/serial_graph.py | 1 | 15603 | #IFIc Version 2.4 corresponding to IFIb Version 2.3 3/8/16 - cmf
#changed oofthreshold = 16, and scalefactor = 31.
#log now collects the IFIb float value scaled by scalefactor
#IFIc Version 2.3 corresponding to IFIb Version 2.3 3/1/16 - cmf
#changed oofthreshold = 16, and scalefactor = 32
import serial
from serial import SerialException #to be able to use Except SerialException:
from subprocess import Popen, PIPE
from os import path
from time import sleep
from datetime import datetime
from datetime import timedelta
##############################################################################
# Imports the necessary software for graphing
import gaugette.ssd1306
import sys
from math import sin #imported sin for Foale function
#Imports necessary setup for GPIO input/output
import RPi.GPIO as GPIO
start_time = datetime.now()
def millis():
dt = datetime.now()-start_time
ms = (dt.days * 24 *60 *60 +dt.seconds)*1000 + dt.microseconds/1000.0
return ms
#declare rfcomm functions to manage the system rf comm device
def rfinit():
#see if channel 22 has been added
p=Popen('sdptool browse local', shell=True, stdout=PIPE)
result = p.communicate()[0] #check to see if 'Serial Port' is present
position = result.find('Channel: 22')
if position > -1:
print("Serial Port is already present")
else:
#this initializes bluetooth to be discoverable and adds serial channel 22
Popen('sudo hciconfig hci0 piscan',shell=True)
Popen('sudo sdptool add --channel=22 SP',shell=True)
print("Serial Port channel 22 was added")
return
def rfhangup():
#this releases the comm port, if it exists, and returns
Popen('sudo rfcomm release /dev/rfcomm0',shell=True,stdout=PIPE)
return
def rflisten():
#this opens a comm port on rfcomm0 channel 22, which is left running,
p = Popen('sudo rfcomm listen /dev/rfcomm0 22 &',shell=True,stdout=PIPE)
return
def rfshow():
#this checks to see if a connection on rfcomm0 has been made
#it returns a bool, True or False
p = Popen('rfcomm show /dev/rfcomm0',shell=True,stdout=PIPE,stderr=PIPE)
result = p.communicate()[0] #check the 1st tuple for the string returned
position= result.find('connect') #does it contain connect?
bool_connected = False
if position > -1:
bool_connected = True
return bool_connected
def rfcommListen(timeout):
start_count = 0 #counter to see if we have waited too long
goodQ = False # we return the value of this, True means we got connected
#first hangup any connection
rfhangup()
#give the system and the remote a chance to do stuff
#print('rfcommlisten: sleeping 60 sec after hangup')
sleep(2)
#open the port on channel 22 and wait
rflisten()
#put some text on the display as to what we are doing
led.clear_display()
text = 'Waiting for connect..'
led.draw_text2(0,0,text,1)
text2 = 'Press Pb1+Pb2 to Exit'
led.draw_text2(0,16,text2,1)
led.display()
#print('finished rflisten')
#now wait for a connection
while True:
start_count = start_count +1
#print('while loop begun')
if start_count > timeout:
print('Listen for connection timed out. Hanging up')
rfhangup()
return False
sleep(1) #wait a second, so we dont use up too much cpu
if GPIO.input(Pb1) == False: #see if button pushed to Exit
if GPIO.input(Pb2) == False:
print('Pb1 & Pb2 pressed!')
led.clear_display()
text = 'Pb1 & Pb2 pressed!'
led.draw_text2(0,0,text,1)
text2 = 'Powercycle to restart'
led.draw_text2(0,16,text2,1)
led.display()
exit()
#print('starting rfshow')
if rfshow():
print('We are connected')
break #we are connected
#see if the /dev/rfcomm0 path exists to prove we are connected
#print('past rfshow')
bool_path_exists = path.exists('/dev/rfcomm0')
print('rfcomm0 path is '+str(bool_path_exists))
if bool_path_exists :
ser = serial.Serial('/dev/rfcomm0', 9600)
else :
print('rfcomm0 was not created as expected')
rfhangup()
return False
#rfcomm exists so open the serial port
ser.open()
#send an acknowlegement
ser.write('Ready for data\n')
#read the response - it will wait forever
singleread = ser.readline()
if singleread.find('data follows') > -1 :
print(singleread)
goodQ = True
nbytes = ser.inWaiting()
#print("inWaiting "+ str(nbytes))
return (goodQ, ser)
def command(bOn): # function for sending a command back to data-recorder Pi upon button push - ICF
print('Send Command1') # Temporary command response for testing - ICF
ser.write('CMD1\n') #an acknowledgement is always terminated with \n
#log the event
timems = millis()
logfile.write(str(timems) + ' ' + '9999' + '\n')
#put some text on the display as to what we are doing
led.clear_display()
text = 'Saving recent history..'
led.draw_text2(0,0,text,1)
text2 = 'Push button 2 for IFI incorporation'
led.draw_text2(0,16,text2,1)
led.display()
#reconfigure the LED
if bOn: #this means the LED should be on already
GPIO.output(LED_out, False) #bOn ==True means we were out of family, so turn off the LED
return
GPIO.output(LED_out, True) #bOn==False means we are in family, just want to send a command
sleep(0.2)
GPIO.output(LED_out, False)
return
def command2(bOn): # function for sending a command back to data-recorder Pi upon button push(new command will be update command) - ICF-cmf
print('Send Command3') # tells IFIb to pause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
#put some text on the display as to what we are doing
led.clear_display()
text = 'IFI Paused..'
led.draw_text2(0,0,text,1)
text2 = 'Press Pb1 IFI Update'
led.draw_text2(0,16,text2,1)
led.display()
sleep(1)
while True: #wait for either pb1 or pb2
sleep(.1)
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
#now see what we got..it should always be a float number of one sort or another
fnum = float(singleread)
print('command2 got ' + str(singleread))
ser.write('\n') #an acknowledgement of any bytes received must be made
if fnum == 8881.0:
print('CMD3 pause terminated by IFI ')
timems = millis()
logfile.write(str(timems) + ' ' + '8881' + '\n')
return
if GPIO.input(Pb1) == False: # this is confirmation we want IFI Update
print('Send Command3') # tells IFIb to unpause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
text2 = 'Pb1 pressed... '
led.draw_text2(0,16,text2,1)
led.display()
sleep(2) #time to release pb1 or else the shutdown routine will execute
break
if GPIO.input(Pb2) == False:
print('Send Command3') # tells IFIb to unpause sending, and only continues when it receives another pause command
ser.write('CMD3\n') #an acknowledgement is always terminated with \n
return
except IOError:
print("IO error in command2()")
return
#first see if we are trying to do a regular shutdown using pb1 as well
if GPIO.input(Pb1) == False:
logfile.close()
led.clear_display()
text = 'Both pbs pressed..'
led.draw_text2(0,0,text,1)
text2 = 'IFI Closed'
led.draw_text2(0,16,text2,1)
led.display()
exit()
print('Send Command2') # Temporary command response for testing - ICF
ser.write('CMD2\n') #an acknowledgement is always terminated with \n
#log the event
timems = millis()
logfile.write(str(timems) + ' ' + '8888' + '\n')
#put some text on the display as to what we are doing
led.clear_display()
text = 'Updating IFI... '
led.draw_text2(0,0,text,1)
text2 = 'Please wait > 2 mins'
led.draw_text2(0,16,text2,1)
led.display()
#reconfigure the LED
if bOn: #this means the LED should be on already
GPIO.output(LED_out, False) #bOn ==True means we were out of family, so turn off the LED
GPIO.output(LED_out, True) #bOn==False means we are in family, just want to send a command
sleep(0.2)
GPIO.output(LED_out, False)
#now wait for a response. It is possible another number will be received, that was sent by IFI
#before it received the CMD2 command, so we need to handle that
sleep(2) #should be enough for any present to be received
print("Command2 done waiting for bytes")
while True:
sleep(0.1)
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
ser.write('\n') #an acknowledgement of any bytes received must be made
#now see what we got..it should always be a float number of one sort or another
fnum = float(singleread)
print('command2 got ' + str(singleread))
if fnum == 8888.0:
print('CMD2 is complete')
timems = millis()
logfile.write(str(timems) + ' ' + '8888' + '\n')
break
except IOError:
print("IO error in command2()")
return
return
### function added to draw vertical lines by CMF
def line(i,j,k,bLed): #draw a line from (i,j) to (i+1,k)
m = (j+k)/2 #this is the mid point in height between the start y and end y
if k > j: #going up
for l in range(j,m+1): #starting at j, above i, go up to m
led.draw_pixel(i,l, bLed) #draw the pixels up half way
for l in range(m+1,k+1):
led.draw_pixel(i+1,l, bLed) #draw the remaining pixes on step over in i
return
if k < j: #going down
for l in range(j, m, -1): #starting at j, decrease down to m
led.draw_pixel(i,l, bLed)
for l in range(m, k-1, -1): #starting half way down, decrease down to k, one i over
led.draw_pixel(i+1,l, bLed)
return
if j==k: #just draw pixels horizontally next to each other
led.draw_pixel(i,j, bLed)
led.draw_pixel(i+1, k, bLed)
return
def testline(): #test the line function
led.clear_display()
lh = 21
history = range(0,31,3)+range(31,-1,-3)
for i in range(0,lh):
line(i,history[i],history[i+1], True)
sleep(1)
for i in range(0,lh):
line(i,history[i],history[i+1], False)
#check the OLED has no lines :-) - CMF
led.clear_display()
return
########################END OF FUNCTION DEFINITIONS##################################
print('start')
# Sets up our pins and creates variables for the size of the display. If using other size display you can easily change them.
#get the next log file number
lognumberfile = open('/home/pi/projects/OLEDPython/lognumber.txt','r')
lognumberstr = lognumberfile.readline()
#increment the next log file number
nextlognumber = int(lognumberstr) +1
#write the next lognumber to the file
lognumberfile.close()
lognumberfile = open('/home/pi/projects/OLEDPython/lognumber.txt','w')
lognumberfile.write(str(nextlognumber)+'\n')
lognumberfile.close()
#setup the logfile name
logfilename = '/home/pi/projects/OLEDPython/ifilog' + str(int(lognumberstr)) + '.txt'
print('Using log file ' + logfilename)
logfile = open(logfilename,'w')
RESET_PIN = 15
DC_PIN = 16
width = 128
height = 32
led = gaugette.ssd1306.SSD1306(reset_pin=RESET_PIN, dc_pin=DC_PIN)
led.begin()
led.clear_display()
GPIO.setmode(GPIO.BCM)
Pb1 = 24 #CMF switched pb's 2/11/16 #GPIO pin 23 is the input from button 1 for restarting program upon disconnect, or sending 'save data for IFI' command back to data recorder - ICF
Pb2 = 23 #CMF switched pb's #GPIO pin 24 is input from button 2 for reboot command to reboot data recorder - ICF
LED_out = 18
GPIO.setup(Pb2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(Pb1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_out, GPIO.OUT) #GPIO pin 18 is the output to the LED
#RUN = True #Variable that keeps program running until final exception
for x in range(width):
#print(x)
fx = float(x)
y = (16 * sin((fx/128)*6.28) +16)
iy = int(y)
#print(iy)
led.draw_pixel(x,iy, True)
led.display()
print('Display initialized')
#initialize LED on count down timer to be 0, so the light will not be on
ledoncountdowntimer = 0;
#initialize the out of family threshold compared to max scale of 32
oofthreshold = 16 # 3/1/16 -cmf v 2.3
GPIO.output(LED_out, False) #make sure the LED is off
while True:
rfinit()
#Listen for connection
bool_result, ser = rfcommListen(3600)
if bool_result == False:
print('nobody connected before timeout')
print('releasing rfcomm0')
rfhangup()
exit()
led.clear_display()
timeout = 300 #roughly 25s delay
startcount = 0
history = [] #array for datapoints to go on graph, for 128 pixels wide
scalefactor = 31.0 #multiplies received normalized datapoints for display - height is 31
for i in range(width):
history.append(31) #fills the history array with max values, which is a line at the bottom of the display
#initialization is done, now get data
while True:
sleep(0.1)
startcount = startcount + 1
if startcount > timeout:
ser.close()
print('Timeout exceeded - closing')
break
led.display()
try:
nbytes = ser.inWaiting()
if nbytes > 0:
singleread = ser.readline()
print(singleread)
timems = millis()
fnum = scalefactor * float(singleread)
logfile.write(str(timems) + ' ' + str(fnum) + '\n') #Version 2.4 - 3/8/16 -cmf
inum =31 - int(fnum)
if inum > 31 :
inum = 31
if inum < 0:
inum = 0
oof = (31 - inum) # Out Of Family scaled 0 - 31
print(str(fnum)+' ' + str(oof)) #our debug output
for i in range(width-1):
line(i,history[i],history[i+1],False) #undraw the old pixels
history.pop(0) #remove the oldest value
history.append(inum) #add the new value to history
for i in range(width-1):
#led.draw_pixel(i,history[i], True) #draw the new value as a pixel
line(i,history[i],history[i+1],True)
# This is the sending acknowledgement
bOn = (oof > oofthreshold) # this is our Out of Family threshhold
if oof > oofthreshold:
GPIO.output(LED_out, True) #we are Out of Family - turn the light on
ledoncountdowntimer = 120; #for the time the data is on the display we will stay on before resetting, to attract attention to display
if oof < oofthreshold and ledoncountdowntimer > 0: #see if we should reset the LED
ledoncountdowntimer = ledoncountdowntimer-1;
if ledoncountdowntimer == 0: #the data has scrolled off the display, so reset
GPIO.output(LED_out, False)
if GPIO.input(Pb1) == False:
command(bOn)
#wait for a response from sender the command is complete - TBD
sleep(1) #this has to be very short so that sender does not wait too long for a response, then hangsup
led.clear_display() #prepare the display for normal graph
if GPIO.input(Pb2) == False:
command2(bOn)
print("command2 done. Back in the main loop..")
#wait for a response from sender the command is complete - TBD
sleep(1) #this has to be very short so that sender does not wait too long for a response, then hangsup
led.clear_display() #prepare the display for normal graph
else: #we either send a \n on its own, or preceded as a command (see above)
ser.write('\n')
startcount = 0
except IOError:
print('connection was dropped')
#close the log file
logfile.close()
#put some text on the display as to what we are doing
led.clear_display()
text = 'Connection was dropped'
led.draw_text2(0,0,text,1)
text2 = 'Push Pb1 to restart'
led.draw_text2(0,16,text2,1)
led.display()
while True:
if GPIO.input(Pb1) == False: #see if button pushed to reboot
exit()
#Popen('sudo reboot', shell=True)
ser.close()
break
print('releasing rfcomm0')
rfhangup()
print(history)
| gpl-3.0 | -3,365,956,879,989,339,000 | 33.596452 | 192 | 0.68192 | false | 3.02091 | false | false | false |
marco-lancini/Showcase | app_users/forms.py | 1 | 2023 | from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from app_users.models import UserProfile, Employment
from app_users.models_nn import CreativeFields
from app_collaborations.options import CREATIVE_FIELDS
#=========================================================================
# USER
#=========================================================================
class UserProfileForm(ModelForm):
"""
Manage the basic informations of a user profile
.. seealso:: :class:`app_users.models.UserProfile`
"""
class Meta:
model = UserProfile
exclude = ('user', 'employment')
class UserAuthForm(ModelForm):
"""
Manage the account informations of a user profile
"""
class Meta:
model = User
fields = ('email',)
#=========================================================================
# EMPLOYMENT
#=========================================================================
class EmploymentForm(ModelForm):
"""
Manage the employment data of a user
.. seealso:: :class:`app_users.models.Employment`
"""
class Meta:
model = Employment
#=========================================================================
# CREATIVE FIELDS
#=========================================================================
class CreativeFieldsAddForm(forms.Form):
"""
Manage the creative fields of a user
.. seealso:: :class:`app_users.models_nn.CreativeFields`
"""
class Meta:
model = CreativeFields
exclude = ('userprofile',)
def __init__(self, *args, **kwargs):
user_filter = kwargs.pop('user_filter')
super(CreativeFieldsAddForm, self).__init__(*args, **kwargs)
fields = CreativeFields.objects.filter(userprofile=user_filter)
already_present = [ x.creative_field for x in fields ]
actual_choices = [ (k, v) for k, v in CREATIVE_FIELDS if k not in already_present ]
actual_choices = tuple(tuple(x) for x in actual_choices)
self.fields.insert(len(self.fields)-1, 'creative_field', forms.ChoiceField(choices=actual_choices))
| mit | 4,450,801,757,472,050,700 | 28.75 | 101 | 0.562037 | false | 4.154004 | false | false | false |
thortex/rpi3-webiopi | webiopi_0.7.1/python/webiopi/devices/sensor/htu21d.py | 1 | 4904 | # Copyright 2014 Zoltán Zörgő <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Sensor datasheet: http://www.meas-spec.com/downloads/HTU21D.pdf
#
# Credits to: Jay Wineinger <[email protected]>
# Based on: https://github.com/jwineinger/quick2wire-HTU21D/blob/master/htu21d.py
import time
from webiopi.devices.i2c import I2C
from webiopi.devices.sensor import Temperature,Humidity
from webiopi.utils.types import toint
class CRCFailed(Exception): pass
class HTU21D(I2C, Temperature, Humidity):
CMD_READ_TEMP_HOLD = 0xe3
CMD_READ_HUM_HOLD = 0xe5
CMD_READ_TEMP_NOHOLD = 0xf3
CMD_READ_HUM_NOHOLD = 0xf5
CMD_WRITE_USER_REG = 0xe6
CMD_READ_USER_REG = 0xe7
CMD_SOFT_RESET= 0xfe
# uses bits 7 and 0 of the user_register mapping
# to the bit resolutions of (relative humidity, temperature)
RESOLUTIONS = {
(0, 0) : (12, 14),
(0, 1) : (8, 12),
(1, 0) : (10, 13),
(1, 1) : (11, 11),
}
# sets up the times to wait for measurements to be completed. uses the
# max times from the datasheet plus a healthy safety margin (10-20%)
MEASURE_TIMES = {
(12, 14): (.018, .055),
(8, 12): (.005, .015),
(10, 13): (.006, .028),
(11, 11): (.01, .009),
}
def __init__(self):
I2C.__init__(self, 0x40)
self.resolutions = self.get_resolutions()
self.rh_timing, self.temp_timing = self.MEASURE_TIMES[self.resolutions]
def __str__(self):
return "HTU21D(slave=0x%02X)" % self.slave
def __family__(self):
return [Temperature.__family__(self), Humidity.__family__(self)]
def check_crc(self, sensor_val):
message_from_sensor = sensor_val >> 8
check_value_from_sensor = sensor_val & 0x0000FF
remainder = message_from_sensor << 8 # Pad with 8 bits because we have to add in the check value
remainder |= check_value_from_sensor # Add on the check value
divisor = 0x988000 # This is the 0x0131 polynomial shifted to farthest left of three bytes
# Operate on only 16 positions of max 24. The remaining 8 are our remainder and should be zero when we're done.
for i in range(16):
if remainder & (1<<(23 - i)): #Check if there is a one in the left position
remainder ^= divisor
divisor >>= 1 # Rotate the divisor max 16 times so that we have 8 bits left of a remainder
if remainder:
raise CRCFailed("CRC checksum failed.")
def reset(self):
self.writeByte(self.CMD_SOFT_RESET);
time.sleep(.02)
def set_resolution(self, resIndex):
self.writeRegister(self.CMD_WRITE_USER_REG, resIndex)
time.sleep(.02)
def get_resolutions(self):
user_reg = self.readRegister(self.CMD_READ_USER_REG)
return self.RESOLUTIONS[user_reg >> 6, user_reg & 0x1]
def get_temp(self):
self.writeByte(self.CMD_READ_TEMP_NOHOLD);
time.sleep(self.temp_timing)
results = self.readBytes(3)
raw_temp = int.from_bytes(results, byteorder="big")
self.check_crc(raw_temp)
results[1] = results[1] & 0xFC # clear status bits
raw_temp = int.from_bytes(results, byteorder="big")
return -46.85 + (175.72 * ((raw_temp >> 8) / float(2**16)))
def get_rel_humidity(self):
self.writeByte(self.CMD_READ_HUM_NOHOLD);
time.sleep(self.rh_timing)
results = self.readBytes(3)
raw_hum = int.from_bytes(results, byteorder="big")
self.check_crc(raw_hum)
results[1] = results[1] & 0xFC # clear status bits
raw_hum = int.from_bytes(results, byteorder="big")
return -6 + (125 * ((raw_hum >> 8) / float(2**16)))
def get_comp_rel_humidity(self):
RHactualT = self.get_rel_humidity()
Tactual = self.get_temp()
CoeffTemp = -0.15 # from datasheet
return RHactualT + (25 - Tactual)*CoeffTemp
def __getCelsius__(self):
self.reset()
return self.get_temp()
def __getFahrenheit__(self):
return self.Celsius2Fahrenheit()
def __getKelvin__(self):
return self.Celsius2Kelvin()
def __getHumidity__(self):
self.reset()
return self.get_comp_rel_humidity() / 100.00
| apache-2.0 | 123,592,174,051,203,000 | 34.258993 | 119 | 0.618037 | false | 3.318213 | false | false | false |
cynja/coffeenator | webinterface/lib/forms.py | 1 | 2284 | '''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Aaron Meier <[email protected]>
'''
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from webinterface.lib.models import UserProfile, Settings, Coffee
class UserCreationForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ["username", "password", "first_name", "last_name", "email"]
class ProfileCreationForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ["language", "is_admin"]
class SettingsChangeForm(forms.ModelForm):
descale = forms.BooleanField(widget=forms.CheckboxInput, required=False)
welcome_message = forms.CharField(widget=forms.Textarea(attrs={'rows':5, 'cols':30}))
class Meta:
model = Settings
fields = ["force_ssl", "welcome_message", "telnet"]
class LoginForm(forms.ModelForm):
remember = forms.BooleanField(widget=forms.CheckboxInput, required=False)
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ["username", "password"]
class CoffeeOrderForm(forms.ModelForm):
typ = forms.ChoiceField(widget=forms.RadioSelect, required=True, choices=Coffee.typ_choices)
cups = forms.ChoiceField(widget=forms.RadioSelect, required=True, choices=Coffee.cups_choices)
datetime = forms.DateTimeField(required=False)
now = forms.BooleanField(required=False)
class Meta:
model = Coffee
fields = ["typ", "cups", "datetime"] | gpl-3.0 | 6,848,134,421,853,217,000 | 38.396552 | 98 | 0.715849 | false | 4.206262 | false | false | false |
bitcraze/toolbelt | src/toolbelt/test/utils/test_subproc.py | 1 | 3487 | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Toolbelt - a utility tu run tools in docker containers
# Copyright (C) 2016 Bitcraze AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import unittest
from unittest.mock import MagicMock
from toolbelt.utils.subproc import SubProc
from toolbelt.utils.exception import ToolbeltException
class SubprocTest(unittest.TestCase):
def setUp(self):
self.sut = SubProc()
def test_that_call_passes_args_on(self):
# Fixture
subprocess.call = MagicMock(return_value=47)
# Test
actual = self.sut.call(1, "string", name="value")
# Assert
subprocess.call.assert_called_with(1, "string", name="value")
self.assertEqual(47, actual)
def test_that_call_handles_exception(self):
# Fixture
subprocess.call = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'cmd', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.call()
def test_that_check_call_passes_args_on(self):
# Fixture
subprocess.check_call = MagicMock(return_value=b'Some string')
# Test
self.sut.check_call(1, "string", name="value")
# Assert
subprocess.check_call.assert_called_with(1, "string", name="value")
def test_that_check_call_handles_exception(self):
# Fixture
subprocess.check_call = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'message', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.check_call()
def test_that_check_output_passes_args_on(self):
# Fixture
subprocess.check_output = MagicMock(return_value=b'Some string')
# Test
self.sut.check_output(1, "string", name="value")
# Assert
subprocess.check_output.assert_called_with(1, "string", name="value")
def test_that_check_output_handles_exception(self):
# Fixture
subprocess.check_output = MagicMock(
side_effect=subprocess.CalledProcessError(
17, 'message', b'output'))
# Test
# Assert
with self.assertRaises(ToolbeltException):
self.sut.check_output()
def test_that_output_is_converted_to_utf8(self):
# Fixture
subprocess.check_output = MagicMock(return_value=b'Some string')
# Test
actual = self.sut.check_output()
# Assert
self.assertEqual('Some string', actual)
| gpl-3.0 | 8,343,372,510,744,517,000 | 30.7 | 77 | 0.591913 | false | 3.77381 | true | false | false |
myt00seven/svrg | svrg_bn/load_dataset.py | 1 | 3585 | import sys, os
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
def load_dataset(if_data_shake):
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, 'mnist/' + filename)
import gzip
def load_mnist_images(filename):
if not os.path.exists('mnist/' + filename):
download(filename)
with gzip.open('mnist/' + filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
data = data.reshape(-1, 784)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists('mnist/' + filename):
download(filename)
with gzip.open('mnist/' + filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
return data
if if_data_shake==0:
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
else:
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
X_train.flags.writeable = True
y_train.flags.writeable = True
for i in range(50000):
y_train[i] = i%2
if i%2 ==0:
dd = -10
else:
dd = 10
mm = np.ones(784)
X_train[i] = mm*dd
return X_train, y_train, X_val, y_val, X_test, y_test
def load_cifar(f):
import cPickle
fo = open(f, 'rb')
d = cPickle.load(fo)
fo.close()
X_train, y_train = d['data'], np.array(d['labels'], dtype=np.int32)
X_train = X_train.reshape(-1, 3, 32, 32)
X_train, X_val = X_train[:-1000], X_train[-1000:]
y_train, y_val = y_train[:-1000], y_train[-1000:]
X_train, X_test = X_train[:-1000], X_train[-1000:]
y_train, y_test = y_train[:-1000], y_train[-1000:]
return X_train, y_train, X_val, y_val, X_test, y_test
def load_20news():
newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
newsgroups_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(newsgroups_train.data)
X_test = vectorizer.transform(newsgroups_test.data)
y_train = newsgroups_train.target
y_test = newsgroups_test.target
X_train, X_val = X_train[:-1000], X_train[-1000:]
y_train, y_val = y_train[:-1000], y_train[-1000:]
return X_train, y_train, X_val, y_val, X_test, y_test
| mit | -7,397,487,427,911,664,000 | 33.471154 | 98 | 0.60251 | false | 3.244344 | true | false | false |
photoninger/ansible | lib/ansible/modules/network/aci/aci_encap_pool.py | 3 | 5468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_encap_pool
short_description: Manage encap pools on Cisco ACI fabrics (fvns:VlanInstP, fvns:VxlanInstP, fvns:VsanInstP)
description:
- Manage vlan, vxlan, and vsan pools on Cisco ACI fabrics.
- More information from the internal APIC class
I(fvns:VlanInstP), I(fvns:VxlanInstP), and I(fvns:VsanInstP) at
U(https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.5'
options:
allocation_mode:
description:
- The method used for allocating encaps to resources.
- Only vlan and vsan support allocation modes.
aliases: [ mode ]
choices: [ dynamic, static]
description:
description:
- Description for the C(pool).
aliases: [ descr ]
pool:
description:
- The name of the pool.
aliases: [ name, pool_name ]
pool_type:
description:
- The encap type of C(pool).
required: yes
aliases: [ type ]
choices: [ vlan, vxlan, vsan]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
description: Production VLANs
state: present
- name: Remove a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: absent
- name: Query a vlan pool
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool: production
pool_type: vlan
state: query
- name: Query all vlan pools
aci_encap_pool:
hostname: apic
username: admin
password: SomeSecretPassword
pool_type: vlan
state: query
'''
RETURN = r'''
#
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
ACI_MAPPING = dict(
vlan=dict(
aci_class='fvnsVlanInstP',
aci_mo='infra/vlanns-',
),
vxlan=dict(
aci_class='fvnsVxlanInstP',
aci_mo='infra/vxlanns-',
),
vsan=dict(
aci_class='fvnsVsanInstP',
aci_mo='infra/vsanns-',
),
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
allocation_mode=dict(type='str', aliases=['mode'], choices=['dynamic', 'static']),
description=dict(type='str', aliases=['descr']),
pool=dict(type='str', aliases=['name', 'pool_name']),
pool_type=dict(type='str', aliases=['type'], choices=['vlan', 'vxlan', 'vsan'], required=True),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['pool']],
['state', 'present', ['pool']],
],
)
allocation_mode = module.params['allocation_mode']
description = module.params['description']
pool = module.params['pool']
pool_type = module.params['pool_type']
state = module.params['state']
aci_class = ACI_MAPPING[pool_type]["aci_class"]
aci_mo = ACI_MAPPING[pool_type]["aci_mo"]
pool_name = pool
# ACI Pool URL requires the allocation mode for vlan and vsan pools (ex: uni/infra/vlanns-[poolname]-static)
if pool_type != 'vxlan' and pool is not None:
if allocation_mode is not None:
pool_name = '[{0}]-{1}'.format(pool, allocation_mode)
else:
module.fail_json(msg='ACI requires the "allocation_mode" for "pool_type" of "vlan" and "vsan" when the "pool" is provided')
# Vxlan pools do not support allocation modes
if pool_type == 'vxlan' and allocation_mode is not None:
module.fail_json(msg='vxlan pools do not support setting the allocation_mode; please remove this parameter from the task')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=aci_class,
aci_rn='{0}{1}'.format(aci_mo, pool_name),
filter_target='eq({0}.name, "{1}")'.format(aci_class, pool),
module_object=pool,
),
)
aci.get_existing()
if state == 'present':
# Filter out module parameters with null values
aci.payload(
aci_class=aci_class,
class_config=dict(
allocMode=allocation_mode,
descr=description,
name=pool,
)
)
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class=aci_class)
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
| gpl-3.0 | 5,317,981,478,543,609,000 | 27.479167 | 135 | 0.623811 | false | 3.614012 | false | false | false |
tbeckham/eutester | eutester/winrm_connection.py | 5 | 10249 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
__author__ = 'clarkmatthew'
from winrm.protocol import Protocol
from winrm.exceptions import WinRMTransportError
from isodate.isoduration import duration_isoformat
from datetime import timedelta
import StringIO
import traceback
import socket
import copy
import sys
import time
import re
class Winrm_Connection:
def __init__(self,
hostname,
username,
password,
port=5985,
protocol='http',
transport='plaintext',
default_command_timeout=600,
url=None,
debug_method=None,
verbose=True):
self.debug_method = debug_method
self.hostname = hostname
self.username = username
self.password = password
self.port = int(port)
self.protocol = protocol
self.transport = transport
self.default_command_timeout = default_command_timeout #self.convert_iso8601_timeout(default_command_timeout)
self.url = url or str(protocol)+"://"+str(hostname)+":"+str(port)+"/wsman"
self.winproto = self.get_proto()
self.shell_id = None
self.command_id = None
self.last_used = None
self.verbose = verbose
def get_proto(self):
self.debug('Creating winrm connection:' + str(self.hostname) + ":" + str(self.port) + ", Username:" + str(self.username) + ', Password:' + str(self.password))
winproto = Protocol(endpoint=self.url,transport=self.transport,username=self.username,password=self.password)
#winproto.transport.timeout = self.default_command_timeout
return winproto
def convert_iso8601_timeout(self, timeout):
#convert timeout to ISO8601 format
return duration_isoformat(timedelta(int(timeout)))
def debug(self, msg):
if self.debug_method:
self.debug_method(msg)
else:
print(msg)
def reset_shell(self, timeout=None, retries=5):
retry = 0
tb = ""
e = None
self.close_shell()
timeout = timeout or self.default_command_timeout
self.winproto.transport.timeout = timeout #self.default_command_timeout
#self.debug('reset_shell connection, Host:' + str(self.hostname) + ":" + str(self.port) + ", Username:" + str(self.username) + ', Password:' + str(self.password))
while retry < retries:
retry += 1
try:
self.shell_id = self.winproto.open_shell()
return self.shell_id
except WinRMTransportError, wte:
print "Failed to open shell on attempt#:" + str(retry) + "/" + str(retries)+ ", err:" + str(wte)
if retry < retries:
time.sleep(5)
except Exception, e:
tb = self.get_traceback()
errmsg = "Error caught while reseting winrm shell:" +str(e)
self.debug("Error caught while reseting winrm shell:" +str(e))
self.debug(str(tb))
raise Exception('Could not open shell to ' + str(self.url) + str(e))
def cmd(self, command, console_mode_stdin=True, skip_cmd_shell=False, timeout=None, verbose=None):
errmsg = ""
if verbose is None:
verbose = self.verbose
orig_cmd = copy.copy(command)
arguments = command.split(' ')
command = arguments.pop(0)
self.command_id = None
#if timeout is not None:
#convert timeout to ISO8601 format
#timeout = self.convert_iso8601_timeout(timeout)
self.reset_shell(timeout=timeout)
try:
self.command_id= self.winproto.run_command(self.shell_id,
command,
arguments=arguments,
console_mode_stdin=console_mode_stdin,
skip_cmd_shell=skip_cmd_shell)
self.debug('winrm timeout:' + str(timeout) + ', cmd:' + str(orig_cmd))
if timeout is not None:
sockdefault = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
stdout, stderr, statuscode = self.get_timed_command_output(self.shell_id, self.command_id, active_timeout=timeout)
else:
stdout, stderr, statuscode = self.winproto.get_command_output(self.shell_id, self.command_id)
self.debug( 'Command:"' + str(orig_cmd) + '" , Done.')
except WinRMTransportError as wte:
errmsg = str(wte)
except CommandTimeoutException as cte:
self.debug(str(cte))
errmsg = 'timed out'
finally:
try:
#self.winproto.transport.timeout = self.default_command_timeout
if timeout is not None:
socket.setdefaulttimeout(sockdefault)
self.winproto.cleanup_command(self.shell_id, self.command_id)
except: pass
self.close_shell()
if errmsg:
if re.search('timed out', errmsg, re.IGNORECASE):
raise CommandTimeoutException('ERROR: Timed out after:' +
str(self.winproto.transport.timeout) +
', Cmd:"' + str(orig_cmd))
else:
raise Exception(errmsg)
if verbose:
self.debug("\n" + str(stdout) + "\n" + str(stderr))
return {'stdout':stdout, 'stderr':stderr, 'statuscode':statuscode}
def get_timed_command_output(self, shell_id, command_id, active_timeout=0):
"""
Get the Output of the given shell and command
@param string shell_id: The shell id on the remote machine. See #open_shell
@param string command_id: The command id on the remote machine. See #run_command
@param int active_timeout: Time out used during an active session. For example as the shell is actively returning
data, but we want to timeout anyways. See cmd timeout for idle timeout where no
data has been read.
"""
stdout_buffer, stderr_buffer = [], []
command_done = False
start = time.time()
while not command_done:
elapsed = time.time()-start
if active_timeout and (elapsed > active_timeout):
raise CommandTimeoutException('Active timeout fired after:' + str(elapsed))
stdout, stderr, return_code, command_done = \
self.winproto._raw_get_command_output(shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
return ''.join(stdout_buffer), ''.join(stderr_buffer), return_code
def close_shell(self):
if self.shell_id:
self.winproto.close_shell(self.shell_id)
self.shell_id = None
def sys(self, command, include_stderr=False, listformat=True, carriage_return=False, timeout=None, code=None, verbose=None):
ret = []
if verbose is None:
verbose = self.verbose
output = self.cmd(command, timeout=timeout, verbose=verbose )
if code is not None and output['statuscode'] != code:
raise CommandExitCodeException('Cmd:' + str(command) + ' failed with status code:'
+ str(output['statuscode'])
+ "\n, stdout:" + str(output['stdout'])
+ "\n, stderr:" + str(output['stderr']))
ret = output['stdout']
if ret:
if not carriage_return:
#remove the '\r' chars from the return buffer, leave '\n'
ret = ret.replace('\r','')
if listformat:
ret = ret.splitlines()
if include_stderr:
ret = ret.extend(output['stderr'].splitlines())
return ret
@classmethod
def get_traceback(cls):
'''
Returns a string buffer with traceback, to be used for debug/info purposes.
'''
try:
out = StringIO.StringIO()
traceback.print_exception(*sys.exc_info(),file=out)
out.seek(0)
buf = out.read()
except Exception, e:
buf = "Could not get traceback"+str(e)
return str(buf)
class CommandExitCodeException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CommandTimeoutException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) | bsd-2-clause | 8,636,401,823,043,342,000 | 40 | 170 | 0.591082 | false | 4.359422 | false | false | false |
chrisjsewell/ipymd | ipymd/plotting/animation_examples/multi_axes.py | 1 | 2903 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 17 22:02:16 2016
@author: cjs14
"""
#http://www.astrojack.com/tag/ipython-notebook/
#%matplotlib inline
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
Nframes = 100
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure(figsize=(6,6))
rad = 0.5
# ax = fig.add_subplot(111, xlim=(-2.*rad, 2.*rad), ylim=(-2.*rad, 2.*rad), aspect='equal')
ax = plt.subplot2grid((3,3), (0,0), colspan=2, rowspan=2,
xlim=(-2.*rad, 2.*rad), ylim=(-2.*rad, 2.*rad), aspect='equal')
circ = plt.Circle((0, 0), radius=rad, facecolor="None", edgecolor='k', lw=4)
ax.add_patch(circ)
ax.grid(False)
ax.axis('off')
circle, = ax.plot([], [], marker='o', ms=10)
#ax1 = fig.add_subplot(212, ylim=(0, 2.*np.pi), xlim=(-2.*rad, 2.*rad))
ax1 = plt.subplot2grid((3,3), (2,0), colspan=2, ylim=(0, 2.*np.pi), xlim=(-2.*rad, 2.*rad), sharex=ax)
ax1.tick_params(axis='both', which='major', labelsize=10)
ax1.set_ylabel('time', fontsize=12)
ax1.set_xlabel('x position', fontsize=12)
x_pos_marker, = ax1.plot([], [], marker='o', ms=10, color='b')
x_pos_line, = ax1.plot([], [], color='k')
#ax2 = fig.add_subplot(122, xlim=(0, 2.*np.pi), ylim=(-2.*rad, 2.*rad))
ax2 = plt.subplot2grid((3,3), (0,2), rowspan=2, xlim=(0, 2.*np.pi), ylim=(-2.*rad, 2.*rad), sharey=ax)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.tick_params(axis='both', which='major', labelsize=10)
ax2.set_xlabel('time', fontsize=12)
ax2.set_ylabel('y position', fontsize=12)
y_pos_marker, = ax2.plot([], [], marker='o', ms=10, color='b')
y_pos_line, = ax2.plot([], [], color='k')
# initialization function: plot the background of each frame
def init():
circle.set_data([], [])
x_pos_marker.set_data([], [])
y_pos_marker.set_data([], [])
x_pos_line.set_data([], [])
y_pos_line.set_data([], [])
return circle, x_pos_marker, y_pos_marker, x_pos_line, y_pos_line
# This function moves the polygons as a function of the frame i
def animate(i):
t = 2.*np.pi*float(i/(Nframes - 1.))
x_marker = rad*np.cos(t)
y_marker = rad*np.sin(t)
circle.set_data(x_marker, y_marker)
x_pos_marker.set_data(x_marker, t)
y_pos_marker.set_data(t, y_marker)
all_t = np.linspace(0, 2.*np.pi, Nframes)
x = rad*np.cos(all_t)
y = rad*np.sin(all_t)
x_pos_line.set_data(x, all_t)
y_pos_line.set_data(all_t, y)
return circle, x_pos_marker, y_pos_marker, x_pos_line, y_pos_line
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=Nframes, interval=20, blit=True)
# call our new function to display the animation
display_animation(anim) | gpl-3.0 | 3,167,111,567,306,893,000 | 34.414634 | 102 | 0.635894 | false | 2.673112 | false | false | false |
tumi8/INSALATA | src/insalata/scanner/modules/XmlScanner.py | 1 | 22767 | from lxml import etree
from insalata.model.Location import Location
from insalata.model.Layer2Network import Layer2Network
from insalata.model.Layer3Network import Layer3Network
from insalata.model.Interface import Interface
def scan(graph, connectionInfo, logger, thread):
"""
Load the network topology given in an XML file into the graph.
Timer is -1 => Objects will not be deleted.
Therefore, the infrastructure in the XML file is laoded permanently until the XML is changed.
The module is able to detect changes in the XML file. Therefore, it is possible to modify the
loaded information at runtime.
Necessary values in the configuration file of this collector module:
- file Path to the XML file the collector module shall parse
:param graph: Data interface object for this collector module
:type graph: insalata.model.Graph.Graph
:param connectionInfo: Information needed to connect to xen server
:type connectionInfo: dict
:param logger: The logger this scanner shall use
:type logger: logging:Logger
:param thread: Thread executing this collector
:type thread: insalata.scanner.Worker.Worker
"""
logger.info("Reading xml file '{0}' into internal graph.".format(connectionInfo['file']))
timeout = -1
name = connectionInfo['name']
configXml = etree.parse(connectionInfo['file'])
readElements = set()
readLocations(graph, configXml.find("locations"), logger, name, timeout, readElements)
readL2Networks(graph, configXml.find("layer2networks"), logger, name, timeout, readElements)
readL3Networks(graph, configXml.find("layer3networks"), logger, name, timeout, readElements)
readHosts(graph, configXml.xpath(".//host[not(@control)]"), logger, name, timeout, readElements)
for element in graph.getAllNeighbors():
inList = False
for el in readElements:
if el == element:
inList = True
if not inList: #Delete verification for these as they do not appear in the Xml
element.removeVerification(name)
def readHosts(graph, hostsXml, logger, name, timeout, readElements):
"""
Load hosts of the xml file into the graph.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param hostsXml: Part of the parsed XML containing the hosts.
:type hostsXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Reading all hosts given in XML")
if not hostsXml:
return
for hostXml in hostsXml:
if "location" in hostXml.attrib:
location = graph.getOrCreateLocation(hostXml.attrib["location"], name, timeout)
else:
location = graph.getOrCreateLocation(hostXml.attrib["physical"])
readElements.add(location)
template = [t for t in location.getTemplates() if t.getID() == hostXml.attrib["template"]]
if len(template) > 0:
template = template[0] #No template shouldn't be possible if xml passed the preprocessor
host = graph.getOrCreateHost(hostXml.attrib["id"], name, timeout, location, template)
readElements.add(host)
logger.debug("Found host: {0}.".format(hostXml.attrib["id"]))
if "cpus" in hostXml.attrib:
host.setCPUs(int(hostXml.attrib["cpus"]))
if ("memoryMin" in hostXml.attrib) and ("memoryMax" in hostXml.attrib):
host.setMemory(int(hostXml.attrib["memoryMin"]), int(hostXml.attrib["memoryMax"]))
if "powerState" in hostXml.attrib:
host.setPowerState(hostXml.attrib["powerState"])
#interfaces, routing, firewall rules and disks added with edges
if hostXml.find("interfaces") is not None:
readInterfaces(graph, hostXml.find("interfaces"), host, logger, name, timeout, readElements)
if hostXml.find("routes") is not None:
readRoutes(graph, hostXml.find("routes"), host, logger, name, timeout, readElements)
if hostXml.find("disks") is not None:
readDisks(graph, hostXml.find("disks"), host, logger, name, timeout, readElements)
if hostXml.find(".//firewallRules") is not None:
readFirewallRules(graph, hostXml.find(".//firewallRules"), host, logger, name, timeout, readElements)
#find firewall raw data
if hostXml.find('.//raw') is not None:
rawXml = hostXml.find('.//raw')
if rawXml is not None:
raw = graph.getOrCreateFirewallRaw(name, timeout, host, rawXml.attrib["firewall"], rawXml.text)
host.setFirewallRaw(raw)
readElements.add(raw)
def readInterfaces(graph, interfacesXml, host, logger, name, timeout, readElements):
"""
Load all interfaces of a host. The interfaces will be added to the host.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param interfacesXml: Part of the parsed XML containing the interfaces of the current host.
:type interfacesXml: list
:param host: The host that contains the read interfaces
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if interfacesXml:
logger.debug("Reading interfaces from XML.")
if not interfacesXml:
return
for ifaceXml in interfacesXml.findall("interface"):
if not "network" in ifaceXml.attrib:
logger.warning("No network attribute found for interface '{0}'.".format(ifaceXml.attrib["mac"]))
continue
network = [n for n in graph.getAllNeighbors(Layer2Network) if n.getID() == ifaceXml.attrib["network"]]
if len(network) == 0:
logger.warning("No suitable network found for interface '{0}'.".format(ifaceXml.attrib["mac"]))
continue
else:
network = network[0]
interface = graph.getOrCreateInterface(ifaceXml.attrib["mac"], name, timeout, network=network)
readElements.add(interface)
logger.debug("Found Interface with mac: {0}.".format(interface.getID()))
if "rate" in ifaceXml.attrib:
interface.setRate(ifaceXml.attrib["rate"])
if "mtu" in ifaceXml.attrib:
interface.setMtu(ifaceXml.attrib["mtu"])
host.addInterface(interface, name, timeout)
readLayer3Addresses(graph, ifaceXml.findall("layer3address"), interface, logger, name, timeout, readElements)
def readLayer3Addresses(graph, layer3AddressesXml, interface, logger, name, timeout, readElements):
"""
Load all Layer3Addresses of a interface. The addresses will be added to the interface automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param layer3AddressesXml: Part of the parsed XML containing the Layer3Addresses of the interface.
:type layer3AddressesXml: list
:param interface: The interface containing the addresses
:type host: insalata.model.Interface.Interface
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if layer3AddressesXml:
logger.debug("Read all Layer3Addresses of interface {0} in XML.".format(interface.getID()))
if not layer3AddressesXml:
return
for addressXml in layer3AddressesXml:
network = None
if "network" in addressXml.attrib:
network = [n for n in graph.getAllNeighbors(Layer3Network) if n.getID() == addressXml.attrib["network"]]
if len(network) == 0:
logger.warning("No suitable network found for {0}.".format(addressXml.attrib["network"]))
network = None
netmask = None
else:
network = network[0]
netmask = network.getNetmask() if not "netmask" in addressXml.attrib else addressXml.attrib["netmask"]
gateway = None if not "gateway" in addressXml.attrib else addressXml.attrib["gateway"]
address = graph.getOrCreateLayer3Address(addressXml.attrib["address"], name, timeout, netmask, gateway)
readElements.add(address)
if "static" in addressXml.attrib:
address.setStatic(addressXml.attrib["static"] == "True")
else:
address.setStatic(True)
if network:
address.setNetwork(network)
interface.addAddress(address, name, timeout)
#get services
if addressXml.find("services") is not None:
readServices(graph, addressXml.find("services"), address, logger, name, timeout, readElements)
def readServices(graph, servicesXml, address, logger, name, timeout, readElements):
"""
Load all services of a Layer3Address. The services willbe added automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param servicesXml: Part of the parsed XML containing the services of this address.
:type hostsXml: list
:param address: The Layer3Address the services are provided on
:type address: insalata.model.Layer3Address.Layer3Address
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not servicesXml:
return
if servicesXml:
logger.debug("Reading Services from XML for address: {0}.".format(address.getID()))
for serviceXml in servicesXml:
#special dhcp service
if serviceXml.tag == "dhcp":
service = graph.getOrCreateDhcpService(name, timeout, address)
if "lease" in serviceXml.attrib:
service.setLease(serviceXml.attrib["lease"])
if ("from" or "to") in serviceXml.attrib:
service.setStartEnd(serviceXml.attrib["from"], serviceXml.attrib["to"])
if ("announcedGateway") in serviceXml.attrib:
service.setAnnouncedGateway(serviceXml.attrib["announcedGateway"])
#special dns service
elif serviceXml.tag == "dns":
service = graph.getOrCreateDnsService(name, timeout, address)
if "domain" in serviceXml.attrib:
service.setDomain(serviceXml.attrib["domain"])
#add more special services here, e.g. http
#generic unknown services
else:
service = graph.getOrCreateService(serviceXml.attrib["port"], serviceXml.attrib["protocol"], name, timeout, serviceXml.attrib["type"], address)
if "type" in serviceXml.attrib:
service.setName(serviceXml.attrib["type"])
if "product" in serviceXml.attrib:
service.setProduct(serviceXml.attrib["product"])
if "version" in serviceXml.attrib:
service.setVersion(serviceXml.attrib["version"])
readElements.add(service)
address.addService(service, name, timeout)
def readRoutes(graph, routingXml, host, logger, name, timeout, readElements):
"""
Load all routes of a host. The routes will be added to the host automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param routingXml: Part of the parsed XML containing the routes.
:type routingXml: list
:param host: The host that contains the read routes.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not routingXml:
return
if routingXml:
logger.debug("Reading all Routes from XML for host {0}.".format(host.getID()))
for routeXml in routingXml:
interface = None
if "interface" in routeXml.attrib:
interface = [i for i in graph.getAllNeighbors(Interface) if i.getID() == routeXml.attrib["interface"]]
if len(interface) == 0:
logger.debug("No interface found found for route. Interface: {0}.".format(routeXml.attrib["interface"]))
else:
interface = interface[0]
route = graph.getOrCreateRoute(name, timeout, host, routeXml.attrib["destination"], routeXml.attrib["genmask"], routeXml.attrib["gateway"], interface)
host.addRoute(route, name, timeout)
readElements.add(route)
def readFirewallRules(graph, rulesXml, host, logger, name, timeout, readElements):
"""
Load all firewall rules of a host. The rules will be added to the host automatically.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param rulesXml: Part of the parsed XML containing the firewall rules.
:type rulesXml: list
:param host: The host that contains the read firewall rules.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Reading all firewall rules from XML for host {0}.".format(host.getID()))
if not rulesXml:
return
for ruleXml in rulesXml:
interface = [i for i in graph.getAllNeighbors(Interface) if "inInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["inInterface"]]
inInterface = interface[0] if len(interface) > 0 else None
if rulesXml:
logger.debug("Reading all firewall rules from XML for host {0}.".format(host.getID()))
for ruleXml in rulesXml:
interface = [i for i in graph.getAllNeighbors(Interface) if "inInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["inInterface"]]
inInterface = interface[0] if len(interface) > 0 else None
interface = [i for i in graph.getAllNeighbors(Interface) if "outInterface" in ruleXml.attrib and i.getID() == ruleXml.attrib["outInterface"]]
outInterface = interface[0] if len(interface) > 0 else None
srcnet = destnet = srcports = destports = protocol = None
if "chain" in ruleXml.attrib:
chain = ruleXml.attrib["chain"]
if "action" in ruleXml.attrib:
action = ruleXml.attrib["action"]
if "srcnet" in ruleXml.attrib:
srcnet = ruleXml.attrib["srcnet"]
if "destnet" in ruleXml.attrib:
destnet = ruleXml.attrib["destnet"]
if "srcports" in ruleXml.attrib:
srcports = ruleXml.attrib["srcports"]
if "destports" in ruleXml.attrib:
destports = ruleXml.attrib["destports"]
if "protocol" in ruleXml.attrib:
protocol = ruleXml.attrib["protocol"]
rule = graph.getOrCreateFirewallRule(name, timeout, host, chain, action, protocol, srcnet, destnet, srcports, destports, inInterface, outInterface)
host.addFirewallRule(rule, name, timeout)
readElements.add(rule)
def readDisks(graph, disksXml, host, logger, name, timeout, readElements):
"""
Load all disks of a host. The disks will be added to the host.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param disksXml: Part of the parsed XML containing the disks of the current host.
:type disksXml: list
:param host: The host that contains the read interfaces.
:type host: insalata.model.Host.Host
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
logger.debug("Read all disks on host {0}.".format(host.getID()))
if not disksXml:
return
for diskXml in disksXml:
logger.debug("Found disk {0} for host {1}.".format(diskXml.attrib["id"], host.getID()))
disk = graph.getOrCreateDisk(diskXml.attrib["id"], name, timeout, host)
if "size" in diskXml.attrib:
disk.setSize(int(diskXml.attrib["size"]))
logger.debug("Adding disk '{0}' to host '{1}'".format(disk.getID(), host.getID()))
host.addDisk(disk, name, timeout)
readElements.add(disk)
if disksXml:
logger.debug("Read all disks on host {0}.".format(host.getID()))
for diskXml in disksXml:
logger.debug("Found disk {0} for host {1}.".format(diskXml.attrib["id"], host.getID()))
disk = graph.getOrCreateDisk(diskXml.attrib["id"], name, timeout, host)
if "size" in diskXml.attrib:
disk.setSize(int(diskXml.attrib["size"]))
logger.debug("Adding disk '{0}' to host '{1}'".format(disk.getID(), host.getID()))
host.addDisk(disk, name, timeout)
readElements.add(disk)
def readL2Networks(graph, l2networksXml, logger, name, timeout, readElements):
"""
Load all Layer2Networks given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param l2networksXml: Part of the parsed XML containing the networks.
:type l2networksXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if l2networksXml:
logger.debug("Reading Layer2Networks from XML.")
if not l2networksXml:
return
for netXml in l2networksXml.findall("layer2network"):
if "location" in netXml.attrib:
location = graph.getOrCreateLocation(netXml.attrib["location"], name, timeout)
else:
location = graph.getOrCreateLocation("physical", name, timeout)
readElements.add(location)
readElements.add(graph.getOrCreateLayer2Network(netXml.attrib["id"], name, timeout, location))
logger.debug("Found Layer2Network {0} in location {1}.".format(netXml.attrib["id"], location.getID()))
def readL3Networks(graph, l3networksXml, logger, name, timeout, readElements):
"""
Load all Layer3Networks given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param l3networksXml: Part of the parsed XML containing the networks.
:type l3networksXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if l3networksXml is None:
return
if l3networksXml:
logger.debug("Reading Layer3Networks from XML.")
for netXml in l3networksXml.findall("layer3network"):
readElements.add(graph.getOrCreateLayer3Network(netXml.attrib["id"], name, timeout, netXml.attrib["address"], netXml.attrib["netmask"]))
logger.debug("Found Layer3Network: {0}.".format(netXml.attrib["id"]))
def readLocations(graph, locationsXml, logger, name, timeout, readElements):
"""
Load all Locations given in the XML.
:param graph: Data Interface object for this scanner
:type graph: :class: `Graph`
:param locationsXml: Part of the parsed XML containing the locations.
:type locationsXml: list
:param logger: The logger this scanner shall use
:type logger: seealso:: :class:`logging:Logger`
:param name: Name this collector module uses for verification
:type name: str
:param timeout: Timeout of this collector module
:type timeout: int
:param readElements: Set containing every read element from the XML -> Allows to delete not longer existing ones
:type readElements: set
"""
if not locationsXml:
return
if locationsXml:
logger.debug("Reading Locations from XML.")
for locationXml in locationsXml.findall("location"):
location = graph.getOrCreateLocation(locationXml.attrib["id"], name, timeout)
logger.debug("Found location: {0}.".format(location.getID()))
readElements.add(location)
| apache-2.0 | -1,567,690,029,503,325,200 | 40.095668 | 162 | 0.663987 | false | 4.286763 | true | false | false |
jigarkb/CTCI | LeetCode/161-M-OneEditDistance.py | 2 | 1107 | # Given two strings s and t, determine if they are both one edit distance apart.
#
# Note:
#
# There are 3 possiblities to satisify one edit distance apart:
#
# Insert a character into s to get t
# Delete a character from s to get t
# Replace a character of s to get t
# Example 1:
#
# Input: s = "ab", t = "acb"
# Output: true
# Explanation: We can insert 'c' into s to get t.
# Example 2:
#
# Input: s = "cab", t = "ad"
# Output: false
# Explanation: We cannot get t from s by only one step.
# Example 3:
#
# Input: s = "1203", t = "1213"
# Output: true
# Explanation: We can replace '0' with '1' to get t.
class Solution(object):
def isOneEditDistance(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
len_s = len(s)
len_t = len(t)
if abs(len_s - len_t) > 1 or s == t:
return False
if len_s > len_t:
return self.isOneEditDistance(t, s)
for i in range(len_s):
if s[i] != t[i]:
return s[i+1:] == t[i+1:] or s[i:] == t[i+1:]
return True
# Note:
# | mit | -4,647,372,064,380,227,000 | 21.16 | 80 | 0.550136 | false | 3 | false | false | false |
DjangoAdminHackers/django-multi-email-field | multi_email_field/tests.py | 1 | 1626 | from django.test import SimpleTestCase
from django.core.exceptions import ValidationError
from multi_email_field.forms import MultiEmailField as MultiEmailFormField
from multi_email_field.widgets import MultiEmailWidget
class MultiEmailFormFieldTest(SimpleTestCase):
def test_widget(self):
f = MultiEmailFormField()
self.assertIsInstance(f.widget, MultiEmailWidget)
def test_to_python(self):
f = MultiEmailFormField()
# Empty values
for val in ['', None]:
self.assertEquals([], f.to_python(val))
# One line correct value
val = ' [email protected] '
self.assertEquals(['[email protected]'], f.to_python(val))
# Multi lines correct values (test of #0010614)
val = '[email protected]\[email protected]\r\[email protected]'
self.assertEquals(['[email protected]', '[email protected]', '[email protected]'],
f.to_python(val))
def test_validate(self):
f = MultiEmailFormField(required=True)
# Empty value
val = []
self.assertRaises(ValidationError, f.validate, val)
# Incorrect value
val = ['not-an-email.com']
self.assertRaises(ValidationError, f.validate, val)
# An incorrect value with correct values
val = ['[email protected]', 'not-an-email.com', '[email protected]']
self.assertRaises(ValidationError, f.validate, val)
# Should not happen (to_python do the strip)
val = [' [email protected] ']
self.assertRaises(ValidationError, f.validate, val)
# Correct value
val = ['[email protected]']
f.validate(val)
| lgpl-3.0 | -7,006,287,341,023,695,000 | 36.813953 | 76 | 0.627921 | false | 3.807963 | true | false | false |
GbalsaC/bitnamiP | pika/setup.py | 1 | 1972 | # ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
from setuptools import setup
import os
import platform
# Conditionally include additional modules for docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
requirements = list()
if on_rtd:
requirements.append('tornado')
# Conditional include unittest2 for versions of python < 2.7
tests_require=['nose', 'mock']
platform_version = list(platform.python_version_tuple())[0:2]
if platform_version[0] != '3' and platform_version != ['2', '7']:
tests_require.append('unittest2')
long_description = ('Pika is a pure-Python implementation of the AMQP 0-9-1 '
'protocol that tries to stay fairly independent of the '
'underlying network support library. Pika was developed '
'primarily for use with RabbitMQ, but should also work '
'with other AMQP 0-9-1 brokers.')
setup(name='pika',
version='0.9.10p0',
description='Pika Python AMQP Client Library',
long_description=long_description,
author='Tony Garnock-Jones',
author_email='[email protected]',
maintainer='Gavin M. Roy',
maintainer_email='[email protected]',
url='https://github.com/pika ',
packages=['pika', 'pika.adapters'],
license='MPL v1.1 and GPL v2.0 or newer',
install_requires=requirements,
tests_require=tests_require,
test_suite = "nose.collector",
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)',
'Operating System :: OS Independent',
'Topic :: Communications',
'Topic :: Internet',
'Topic :: Software Development :: Libraries',
],
zip_safe=True
)
| agpl-3.0 | 2,402,531,165,235,268,600 | 36.207547 | 78 | 0.631339 | false | 3.836576 | true | false | false |
dudanogueira/llm | flight_options/models.py | 1 | 1327 | from django.db import models
class FlightOption(models.Model):
def __unicode__(self):
return "Flight Option #%s for %s" % (self.id, self.speaker)
speaker = models.ForeignKey('speakers.Speaker')
route_description = models.TextField(blank=True)
# arrival
arrival_date = models.DateTimeField(blank=True, null=True)
arrival_observations = models.TextField(blank=True)
# departure
departure_date = models.DateTimeField(blank=True, null=True)
departure_observations = models.TextField(blank=True)
# sent
sent = models.BooleanField(default=False)
sent_timestamp = models.DateTimeField(blank=True, null=True)
# seen
seen = models.BooleanField(default=False)
seen_timestamp = models.DateTimeField(blank=True, null=True)
# approved
approved = models.BooleanField(default=False)
approved_timestamp = models.DateTimeField(blank=True, null=True)
# approved
rejected = models.BooleanField(default=False)
rejected_timestamp = models.DateTimeField(blank=True, null=True)
rejected_reason = models.TextField(blank=True)
# meta
criado = models.DateTimeField(blank=True, auto_now_add=True, verbose_name="Criado")
atualizado = models.DateTimeField(blank=True, auto_now=True, verbose_name="Atualizado")
| gpl-3.0 | 4,251,486,541,388,065,000 | 39.53125 | 91 | 0.70309 | false | 3.846377 | false | false | false |
sravel/scripts | cluster/grepMotifFromAlignment.py | 1 | 8472 | #!/usr/local/bioinfo/python/3.4.3_build2/bin/python
# -*- coding: utf-8 -*-
# @package grepMotifFromAlignment.py
# @author Sebastien Ravel
"""
The grepMotifFromAlignment script
=================================
:author: Sebastien Ravel
:contact: [email protected]
:date: 08/07/2016
:version: 0.1
Script description
------------------
This Programme parse Aligment info to build motif table of SNP in gene's
Example
-------
>>> grepMotifFromAlignment.py -d path/to/fasta -o filenameout
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display grepMotifFromAlignment.py version number and exit
Input mandatory infos for running:
- \-d <path/to/directory>, --directory <path/to/directory>
path to directory fasta files
- \-o <filename>, --out <filename>
Name of output file
Input infos for running with default values:
- \-l <filename>, --list <filename>
File with Strain to keep (one per row), default keep all strains
"""
##################################################
## Modules
##################################################
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import directory, dictList2txt, dictDict2txt, dict2txt, relativeToAbsolutePath, existant_file, sort_human
## Python modules
import argparse
from time import localtime, strftime
## BIO Python modules
from Bio import AlignIO
from Bio.Align import AlignInfo, MultipleSeqAlignment
##################################################
## Variables Globales
version="0.1"
VERSION_DATE='04/03/2015'
debug="False"
#debug="True"
##################################################
## Functions
##################################################
## Main code
##################################################
if __name__ == "__main__":
# Initializations
start_time = strftime("%d-%m-%Y_%H:%M:%S", localtime())
# Parameters recovery
parser = argparse.ArgumentParser(prog='grepMotifFromAlignment.py', description='''This Programme parse Aligment info to build motif table of SNP in gene's''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display grepMotifFromAlignment.py version number and exit')
#parser.add_argument('-dd', '--debug',choices=("False","True"), dest='debug', help='enter verbose/debug mode', default = "False")
filesreq = parser.add_argument_group('Input mandatory infos for running')
filesreq.add_argument('-d', '--directory', metavar="<path/to/directory>",type=directory, required=True, dest = 'pathDirectory', help = 'path to directory fasta files')
filesreq.add_argument('-o', '--out', metavar="<filename>", required=True, dest = 'paramoutfile', help = 'Name of output file')
files = parser.add_argument_group('Input infos for running with default values')
files.add_argument('-l', '--list', metavar="<filename>", default="ALL", dest = 'listKeepFile', help = 'File with Strain to keep (one per row), default keep all strains')
# Check parameters
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in grepMotifFromAlignment (Version " + version + ") #")
print("#################################################################")
print('Start time: ', start_time,'\n')
# Récupère le fichier de conf passer en argument
pathDirectory = args.pathDirectory
outputfilename = relativeToAbsolutePath(args.paramoutfile)
print("\t - Input pathDirectory is: %s" % pathDirectory)
print("\t - Output file name is: %s" % outputfilename)
if args.listKeepFile not in ["ALL"]:
listKeepSouche = loadInList(existant_file(args.listKeepFile))
print("\t - You want to keep strains:\n%s" % "\n".join(listKeepSouche))
basename = paramlistKeep.split(".")[0]
else:
listKeepSouche = []
print("\t - You want to keep all strains \n")
basename = "All"
dicoOutputTxt = {}
dicoSeqSNP = {}
dicoFilenbSNP ={}
dicoFileCountSNP ={}
fileEmpty = 0
listFileEmpty = []
ctr = 1
nbMotifTotal=0
for filein in pathDirectory.listFiles:
ctr += 1
if ((ctr % 100 == 0) and (ctr != 0)) or (float(ctr) == len(pathDirectory.listFiles)):
percent = (float(ctr)/float(len(pathDirectory.listFiles)))*100
sys.stdout.write("\rProcessed up to %0.2f %%..." % percent)
sys.stdout.flush()
#print(filein)
dicoSeqSNP = {}
nbSNP = 0
tableauSoucheName = []
# lecture de l'alignement
#alignment = AlignIO.read(open(resultataligment,"r"), "fasta")
alignmentStart = AlignIO.read(open(filein,"r"), "fasta")
# cree un nouvelle alignement avec que les souches voulus:
keepListRecord = []
for record in alignmentStart:
if record.id not in listKeepSouche and args.listKeepFile == "ALL" :
listKeepSouche.append(record.id)
#print(record.id)
if record.id in listKeepSouche:
keepListRecord.append(record)
tableauSoucheName.append(record.id)
if record.id not in dicoSeqSNP.keys():
dicoSeqSNP[record.id] = ""
alignment = MultipleSeqAlignment(keepListRecord)
lenAlignement = int(alignment.get_alignment_length())
#print(alignment)
#print(tableauSoucheName)
#print(len(tableauSoucheName))
for indice in range(0,lenAlignement):
tab = list(alignment[:,indice])
#print(tab)
nbO = tab.count(tab[0])
nbA = tab.count("A")
nbC = tab.count("C")
nbT = tab.count("T")
nbG = tab.count("G")
nbN = tab.count("N")+tab.count("n")
nbGap = tab.count("-")
sommeACTG = nbA + nbC + nbT + nbG
allcount = sommeACTG + nbN + nbGap
if int(allcount) != len(alignment): # test si total = nombre de souche
print( sommeACTG, nbA , nbC , nbT, nbG,nbN, nbGap)
print( tab)
exit()
if nbGap == 0 :
if nbO != sommeACTG and nbN == 0:
nbSNP+=1
#print(indice)
for lentabi in range(0,len(tab)):
dicoSeqSNP[tableauSoucheName[lentabi]] += (tab[lentabi])
nbSNPtotal=nbSNP
if nbSNPtotal == 0:
fileEmpty += 1
listFileEmpty.append(filein)
else:
nbMotifTotal+=1
listMotif = []
for geneId, sequence in dicoSeqSNP.items():
nbSNPtotal = (len(sequence))
listMotif.append(sequence)
nameMGG = filein.split("/")[-1].replace("_Orthologue_macse_NT.fasta","")
if nameMGG not in dicoFileCountSNP.keys():
dicoFileCountSNP[nameMGG] = {"NBSNP":nbSNPtotal,
"lenAlign":lenAlignement}
if nbSNPtotal not in dicoFilenbSNP.keys():
dicoFilenbSNP[nbSNPtotal] = 1
else:
dicoFilenbSNP[nbSNPtotal] +=1
#print(nbSNPtotal)
dicoCompteMotif = {k: listMotif.count(k) for k in set(listMotif)}
#print(dict2txt(dicoCompteMotif))
dicoTranslateMotif2Code = {}
code = 10
for motifUniq in dicoCompteMotif.keys():
dicoTranslateMotif2Code[motifUniq] = code
code+=1
for geneId, sequence in dicoSeqSNP.items():
codeSeq = dicoTranslateMotif2Code[sequence]
if geneId not in dicoOutputTxt.keys():
dicoOutputTxt[geneId] = [str(codeSeq)]
else:
dicoOutputTxt[geneId].append(str(codeSeq))
output_handle = open(outputfilename, "w")
#print(dictList2txt(dicoOutputTxt))
outputTxt = ""
#for key in sorted(dicoOutputTxt.keys()):
for key in sorted(listKeepSouche, key=sort_human):
value = "\t".join(dicoOutputTxt[key])
outputTxt += "%s\t%s\n" % (str(key),str(value))
output_handle.write(outputTxt)
outputListEmpty = open(basename+"_outputListEmpty.txt", "w")
for fileEmptyName in listFileEmpty:
outputListEmpty.write(fileEmptyName+"\n")
with open(basename+"_LenAlign_nbSNP.txt","w") as output1:
txt1 = dictDict2txt(dicoFileCountSNP)
output1.write(txt1)
with open(basename+"_nbSNPallFile.txt","w") as output2:
txt1 = dict2txt(dicoFilenbSNP)
output2.write(txt1)
print("\n\nExecution summary:")
print(" - Outputting \n\
Il y a au final %i Motif dans tout les MGG\n\
Il y a %i fichiers vides\n\
les sequences sont ajouter dans le fichier %s\n\
la liste des fichiers vides est dans le fichier outputListEmpty.txt" %(nbMotifTotal,fileEmpty,outputfilename))
print("\nStop time: ", strftime("%d-%m-%Y_%H:%M:%S", localtime()))
print("#################################################################")
print("# End of execution #")
print("#################################################################")
| gpl-3.0 | 4,766,587,580,376,903,000 | 31.702703 | 170 | 0.63412 | false | 3.108257 | false | false | false |
MSLNZ/msl-equipment | tests/test_connection_demo.py | 1 | 8679 | from msl.equipment.connection import Connection
from msl.equipment.connection_demo import ConnectionDemo
from msl.equipment.record_types import EquipmentRecord
from msl.equipment.resources.picotech.picoscope.picoscope import PicoScope
from msl.equipment.resources.picotech.picoscope.channel import PicoScopeChannel
class MyConnection(Connection):
def __init__(self, record):
super(MyConnection, self).__init__(record)
def get_none1(self):
"""No return type is specified."""
pass
def get_none2(self, channel):
"""This function takes 1 input but returns nothing.
Parameters
----------
channel : :obj:`str`
Some channel number
"""
pass
def get_bool1(self):
""":obj:`bool`: A boolean value."""
pass
def get_bool2(self):
"""Returns a boolean value.
Returns
-------
:obj:`bool`
A boolean value.
"""
pass
def get_string1(self):
""":obj:`str`: A string value."""
pass
def get_string2(self):
"""Returns a string value.
Returns
-------
:obj:`str`
A string value.
"""
pass
def get_bytes1(self):
""":obj:`bytes`: A bytes value."""
pass
def get_bytes2(self):
"""Returns a bytes value.
Returns
-------
:obj:`bytes`
A bytes value.
"""
pass
def get_int1(self):
""":obj:`int`: An integer value."""
pass
def get_int2(self):
"""Returns an integer value.
Returns
-------
:obj:`int`
An integer value.
"""
pass
def get_float1(self):
""":obj:`float`: A floating-point value."""
pass
def get_float2(self):
"""Returns a floating-point value.
Returns
-------
:obj:`float`
A floating-point value.
"""
pass
def get_list_of_bool1(self):
""":obj:`list` of :obj:`bool`: A list of boolean values."""
pass
def get_list_of_bool2(self):
"""A list of boolean values.
Returns
-------
:obj:`list` of :obj:`bool`
A list of boolean values.
"""
pass
def get_list_of_str1(self):
""":obj:`list` of :obj:`str`: A list of string values."""
pass
def get_list_of_str2(self):
"""A list of string values.
Returns
-------
:obj:`list` of :obj:`str`
A list of string values.
"""
pass
def get_list_of_bytes1(self):
""":obj:`list` of :obj:`bytes`: A list of bytes values."""
pass
def get_list_of_bytes2(self):
"""A list of bytes values.
Returns
-------
:obj:`list` of :obj:`bytes`
A list of bytes values.
"""
pass
def get_list_of_int1(self):
""":obj:`list` of :obj:`int`: A list of integer values."""
pass
def get_list_of_int2(self):
"""A list of integer values.
Returns
-------
:obj:`list` of :obj:`int`
A list of integer values.
"""
pass
def get_list_of_float1(self):
""":obj:`list` of :obj:`float`: A list of floating-point values."""
pass
def get_list_of_float2(self):
"""A list of floating-point values.
Returns
-------
:obj:`list` of :obj:`float`
A list of floating-point values.
"""
pass
def get_dict_of_bool1(self):
""":obj:`dict` of :obj:`bool`: A dictionary of boolean values."""
pass
def get_dict_of_bool2(self):
"""A dictionary of boolean values.
Returns
-------
:obj:`dict` of :obj:`bool`
A dictionary of boolean values.
"""
pass
def get_dict_of_str1(self):
""":obj:`dict` of :obj:`str`: A dictionary of string values."""
pass
def get_dict_of_str2(self):
"""A dictionary of string values.
Returns
-------
:obj:`dict` of :obj:`str`
A dictionary of string values.
"""
pass
def get_dict_of_bytes1(self):
""":obj:`dict` of :obj:`bytes`: A dictionary of bytes values."""
pass
def get_dict_of_bytes2(self):
"""A dictionary of bytes values.
Returns
-------
:obj:`dict` of :obj:`bytes`
A dictionary of bytes values.
"""
pass
def get_dict_of_int1(self):
""":obj:`dict` of :obj:`int`: A dictionary of integer values."""
pass
def get_dict_of_int2(self):
"""A dictionary of integer values.
Returns
-------
:obj:`dict` of :obj:`int`
A dictionary of integer values.
"""
pass
def get_dict_of_float1(self):
""":obj:`dict` of :obj:`float`: A dictionary of floating-point values."""
pass
def get_dict_of_float2(self):
"""A dictionary of floating-point values.
Returns
-------
:obj:`dict` of :obj:`float`
A dictionary of floating-point values.
"""
pass
def get_multiple1(self):
"""Many different data types.
Returns
-------
:obj:`str`
A string value.
:obj:`float`
A floating-point value.
:obj:`float`
A floating-point value.
:obj:`dict` of :obj:`int`
A dictionary of integer values.
:obj:`bytes`
A bytes value.
"""
pass
def test_return_type_builtin():
demo = ConnectionDemo(EquipmentRecord(), MyConnection)
assert demo.get_none1() is None
assert demo.get_none2() is None
assert isinstance(demo.get_bool1(), bool)
assert isinstance(demo.get_bool2(), bool)
assert isinstance(demo.get_string1(), str)
assert isinstance(demo.get_string2(), str)
assert isinstance(demo.get_bytes1(), bytes)
assert isinstance(demo.get_bytes2(), bytes)
assert isinstance(demo.get_int1(), int)
assert isinstance(demo.get_int2(), int)
assert isinstance(demo.get_float1(), float)
assert isinstance(demo.get_float2(), float)
x = demo.get_list_of_bool1()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_bool2()
assert isinstance(x, list) and isinstance(x[0], bool)
x = demo.get_list_of_str1()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_str2()
assert isinstance(x, list) and isinstance(x[0], str)
x = demo.get_list_of_bytes1()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_bytes2()
assert isinstance(x, list) and isinstance(x[0], bytes)
x = demo.get_list_of_int1()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_int2()
assert isinstance(x, list) and isinstance(x[0], int)
x = demo.get_list_of_float1()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_list_of_float2()
assert isinstance(x, list) and isinstance(x[0], float)
x = demo.get_dict_of_bool1()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_bool2()
assert isinstance(x, dict) and isinstance(x['demo'], bool)
x = demo.get_dict_of_str1()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_str2()
assert isinstance(x, dict) and isinstance(x['demo'], str)
x = demo.get_dict_of_bytes1()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_bytes2()
assert isinstance(x, dict) and isinstance(x['demo'], bytes)
x = demo.get_dict_of_int1()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_int2()
assert isinstance(x, dict) and isinstance(x['demo'], int)
x = demo.get_dict_of_float1()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_dict_of_float2()
assert isinstance(x, dict) and isinstance(x['demo'], float)
x = demo.get_multiple1()
assert len(x) == 5
assert isinstance(x[0], str)
assert isinstance(x[1], float)
assert isinstance(x[2], float)
assert isinstance(x[3], dict) and isinstance(x[3]['demo'], int)
assert isinstance(x[4], bytes)
def test_return_type_object():
scope = ConnectionDemo(EquipmentRecord(), PicoScope)
x = scope.channel()
assert isinstance(x, dict) and x['demo'] == PicoScopeChannel
| mit | -131,559,629,135,997,980 | 23.797143 | 81 | 0.548911 | false | 3.742561 | false | false | false |
PushAMP/vcsserver | setup.py | 1 | 1100 | # coding=utf-8
"""
vcsserver-lib
-------------
vcsserver-lib is library for easy and fast creation of an ssh demon for popular VCSs (Mercurial and Git).
This library uses Twisted framework."""
from setuptools import setup
setup(
name='vcsserver',
version='0.3.1',
url='https://bitbucket.org/3f17/vcssshd-lib',
license='BSD',
author='Dmitry Zhiltsov',
author_email='[email protected]',
description='Library for easy and fast creation of an ssh demon for popular VCSs (Mercurial and Git)',
long_description=__doc__,
#py_modules=['vcssshd'],
packages=['vcsserver'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Twisted', 'pycrypto', 'pyasn1'
],
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Framework :: Twisted',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| gpl-3.0 | 167,791,150,197,428,740 | 28.72973 | 106 | 0.631818 | false | 3.873239 | false | false | false |
vyscond/tspjson | tsp2json.py | 1 | 1084 | import pathlib
import json
import collections
for f in pathlib.Path('tsp').iterdir():
if f.is_file() and str(f)[-3:] == 'tsp' :
tsp = ''.join(open(str(f))).split('\n')
j = collections.OrderedDict()
for i in range(5):
key, val = tsp[i].split(':')
j[key.strip()] = val.strip()
# - Dict
j['NODE_COORD_SECTION'] = {}
print(tsp[5])
for coord in tsp[6:-2]:
tmp = coord.strip().replace(' ',' ').replace(' ',' ')
index, x, y = tmp.split(' ')
j['NODE_COORD_SECTION'][index] = {'x': x, 'y': y}
with open('dict/' + f.name + '.json', 'w') as f2:
f2.write(json.dumps(j, indent=4))
# - List
j['NODE_COORD_SECTION'] = []
for coord in tsp[6:-2]:
coord = coord.strip().replace(' ',' ').replace(' ',' ')
index, x, y = coord.split(' ')
j['NODE_COORD_SECTION'].append({'x': x, 'y': y})
with open('list/' + f.name + '.json', 'w') as f2:
f2.write(json.dumps(j, indent=4))
| mit | 8,349,134,594,298,226,000 | 36.37931 | 69 | 0.458487 | false | 3.235821 | false | false | false |
ipanova/pulp_puppet | pulp_puppet_plugins/pulp_puppet/plugins/migrations/0002_puppet_publishing_directory_change.py | 3 | 1947 | # -*- coding: utf-8 -*-
# Migration script to move published repositories to the new location.
#
# Copyright © 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import logging
import os
import shutil
_log = logging.getLogger('pulp')
OLD_PUBLISH_ROOT_DIR = '/var/www'
OLD_PUPPET_PUBLISH_DIR_NAME = 'pulp_puppet'
NEW_PUBLISH_ROOT_DIR = '/var/lib/pulp/published'
NEW_PUPPET_PUBLISH_DIR_NAME = 'puppet'
def migrate(*args, **kwargs):
"""
Move files from old publish directories to the new location.
"""
old_puppet_publish_dir = os.path.join(OLD_PUBLISH_ROOT_DIR, OLD_PUPPET_PUBLISH_DIR_NAME)
new_puppet_publish_dir = os.path.join(NEW_PUBLISH_ROOT_DIR, NEW_PUPPET_PUBLISH_DIR_NAME)
if os.path.exists(old_puppet_publish_dir) and os.listdir(old_puppet_publish_dir):
# Move contents of '/var/www/pulp_puppet' into '/var/lib/pulp/published/puppet'
move_directory_contents(old_puppet_publish_dir, new_puppet_publish_dir)
_log.info("Migrated published puppet repositories to the new location")
def move_directory_contents(src_dir, dest_dir):
"""
Move everything in src_dir to dest_dir
"""
# perform the move. /var/lib/pulp/published/puppet already exists so we
# need to move like this (i.e, we can't use shutil.copytree). This should
# leave an empty /var/www/pulp_puppet dir.
for entry in os.listdir(src_dir):
shutil.move(os.path.join(src_dir, entry), os.path.join(dest_dir, entry))
| gpl-2.0 | 7,662,015,652,387,057,000 | 41.304348 | 92 | 0.718397 | false | 3.281619 | false | false | false |
maweigert/spimagine | tests/test_rendering/volrender_tester.py | 1 | 2633 |
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import shutil
import numpy as np
from PyQt5 import QtGui, QtCore, QtWidgets
from spimagine.gui.mainwidget import MainWidget
from spimagine.models.data_model import DataModel, NumpyData
from gputools import OCLProgram
CACHEDIRS = ["~/.nv/ComputeCache","~/.cache/pyopencl/pyopencl-compiler-cache-v2-py2.7.6.final.0"]
CACHEDIRS = [os.path.expanduser(_C) for _C in CACHEDIRS]
import spimagine
def absPath(myPath):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)
class MyWidget(MainWidget):
def __init__(self):
super(MyWidget,self).__init__()
self.compileTimer = QtCore.QTimer(self)
self.compileTimer.setInterval(1000)
self.compileTimer.timeout.connect(self.on_compile_timer)
self.compileTimer.start()
def on_compile_timer(self):
for c in CACHEDIRS:
if os.path.exists(c):
print("removing cache: ", c)
shutil.rmtree(c)
print("compiling...")
try:
dirname = os.path.dirname(spimagine.volumerender.__file__)
proc = OCLProgram(os.path.join(dirname,"kernels/volume_kernel.cl"),
build_options =
["-cl-fast-relaxed-math",
"-cl-unsafe-math-optimizations",
"-cl-mad-enable",
"-I %s" %os.path.join(dirname,"kernels/"),
"-D maxSteps=%s"%spimagine.config.__DEFAULTMAXSTEPS__]
)
self.glWidget.renderer.proc = proc
self.glWidget.refresh()
print(np.amin(self.glWidget.output),np.amax(self.glWidget.output))
except Exception as e:
print(e)
if __name__ == '__main__':
x = np.linspace(-1,1,128)
Z,Y,X = np.meshgrid(x,x,x)
R1 = np.sqrt((X+.2)**2+(Y+.2)**2+(Z+.2)**2)
R2 = np.sqrt((X-.2)**2+(Y-.2)**2+(Z-.2)**2)
d = np.exp(-10*R1**2)+np.exp(-10*R2**2)
app = QtWidgets.QApplication(sys.argv)
win = MyWidget()
win.setModel(DataModel(NumpyData(d)))
win.show()
win.raise_()
sys.exit(app.exec_())
| bsd-3-clause | -9,065,273,240,665,425,000 | 25.59596 | 97 | 0.566274 | false | 3.524766 | false | false | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/aio/operations/_dedicated_hosts_operations.py | 1 | 28323 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostsOperations:
"""DedicatedHostsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHost",
**kwargs: Any
) -> "_models.DedicatedHost":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHost')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHost",
**kwargs: Any
) -> AsyncLROPoller["_models.DedicatedHost"]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host .
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHost
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHostUpdate",
**kwargs: Any
) -> "_models.DedicatedHost":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: "_models.DedicatedHostUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.DedicatedHost"]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host .
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHostUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_06_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a dedicated host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
async def get(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
expand: Optional[str] = "instanceView",
**kwargs: Any
) -> "_models.DedicatedHost":
"""Retrieves information about a dedicated host.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param host_name: The name of the dedicated host.
:type host_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHost, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_06_01.models.DedicatedHost
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHost"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'hostName': self._serialize.url("host_name", host_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHost', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}'} # type: ignore
def list_by_host_group(
self,
resource_group_name: str,
host_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DedicatedHostListResult"]:
"""Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink
property in the response to get the next page of dedicated hosts.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_06_01.models.DedicatedHostListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DedicatedHostListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_host_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_host_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts'} # type: ignore
| mit | -5,693,794,811,606,255,000 | 49.129204 | 206 | 0.642481 | false | 4.248238 | true | false | false |
rishubil/sqlalchemy-fulltext-search | setup.py | 1 | 1279 | """
SQLAlchemy FullText Search
"""
from setuptools import setup, Command
setup(
name='SQLAlchemy-FullText-Search',
version='0.2.3',
url='https://github.com/mengzhuo/sqlalchemy-fulltext-search',
license='BSD',
author='Meng Zhuo, Alejandro Mesa',
author_email='[email protected], [email protected]',
description=('Provide FullText for MYSQL & SQLAlchemy model'),
long_description = __doc__,
packages=['sqlalchemy_fulltext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['SQLAlchemy>=0.8',],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules' ]
)
| mit | -265,112,061,610,332,200 | 38.96875 | 99 | 0.52932 | false | 4.702206 | false | false | false |
erikryverling/twinkle-client | twinkle-client/client.py | 1 | 3369 | import sys
import getopt
import platform
import subprocess
from twisted.internet import reactor
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
STAR_REQUEST_TIMEOUT_IN_SECONDS = 3
DEFAULT_URL = "ws://localhost:9000"
STAR_REQUEST_MESSAGE = "twinkle:star"
ERROR_MESSAGE = "twinkle:error"
STARRED_MESSAGE = "twinkle:starred"
def _enum(**enums):
return type('Enum', (), enums)
Sound = _enum(SUCCESS = 1, FAIL = 2)
muted = False
class ClientProtocol(WebSocketClientProtocol):
def __init__(self):
self.has_recived_starred_message = False
def onOpen(self):
self.sendMessage(STAR_REQUEST_MESSAGE)
reactor.callLater(STAR_REQUEST_TIMEOUT_IN_SECONDS, self.timeout)
def onClose(self, wasClean, code, reason):
if not wasClean:
terminateAbnormally(reason)
elif self.has_recived_starred_message == False:
terminateAbnormally("Star request failed")
else:
reactor.stop()
def onMessage(self, message, binary):
if message == ERROR_MESSAGE:
self.sendClose()
elif message == STARRED_MESSAGE:
self.has_recived_starred_message = True
print("Star request succeeded")
if not muted:
playSound(Sound.SUCCESS)
self.sendClose()
def timeout(self):
if not self.has_recived_starred_message:
print("Timeout while waiting for star request's response")
self.sendClose()
class ClientFactory(WebSocketClientFactory):
def __init__(self, url):
WebSocketClientFactory.__init__(self, url)
def clientConnectionFailed(self, connector, reason):
terminateAbnormally(reason)
def terminateAbnormally(reason):
print(reason)
if not muted:
playSound(Sound.FAIL)
reactor.stop()
def playSound(sound):
if (sound == Sound.SUCCESS):
audioFile = "success.wav"
elif (sound == Sound.FAIL):
audioFile = "fail.wav"
else:
raise Exception("Unknown sound state")
try:
playCommand = getPlayCommand(audioFile)
subprocess.call(playCommand, shell=True)
except Exception as e:
print(e)
def getPlayCommand(filePath):
system = platform.system()
if system == "Linux":
return "aplay " + filePath
# TODO Needs testing
elif system == "Darwin":
return "afplay " + filePath
elif system == "Windows":
return "powershell -c (New-Object Media.SoundPlayer \"" + filePath + "\").PlaySync();"
else:
raise Exception("Could not identify platform while trying to play audio")
if __name__ == '__main__':
url = DEFAULT_URL
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"mh", ["url=", "mute-sound", "help"])
except getopt.GetoptError:
print('twinkle [--url=<url to web socket server>] [-m|--mute-sound] [-h|--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('twinkle [--url=<url to web socket server>] [-m|--mute-sound] [-h|--help]')
sys.exit()
elif opt in ("--url"):
url = arg
elif opt in ("-m", "--mute-sound"):
muted = True
factory = ClientFactory(url)
factory.protocol = ClientProtocol
connectWS(factory)
reactor.run()
| mit | -2,050,359,643,606,386,400 | 28.043103 | 94 | 0.620659 | false | 3.859107 | false | false | false |
PuchatekwSzortach/names_database_builder | names/main.py | 1 | 1461 | # -*- coding: utf-8 -*-
import logging
import sys
import sqlalchemy.orm
import database
import structures
def get_names_from_file(path):
with open(path) as file:
data_lines = file.read().splitlines()
names = []
for name_line in data_lines:
fields = name_line.split(",")
japanese_name = structures.JapaneseName(kanji=fields[0], hiragana=fields[1], gender=fields[2])
names.append(japanese_name)
return names
if __name__ == "__main__":
logger = logging.getLogger('names')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.info('Reading raw data')
names = get_names_from_file("./raw_names.txt")
logger.info('Filtering out duplicates')
unique_names = set(names)
logger.info('Filtering out invalid names')
validator = structures.JapaneseNamesValidator()
valid_names = [name for name in unique_names if validator.is_name_valid(name)]
logger.info('Sorting names')
sorted_names = sorted(list(valid_names))
logger.info('Populating database')
Session = sqlalchemy.orm.sessionmaker(bind=database.engine)
session = Session()
# Define genders
boy = database.Gender(gender=u'男')
girl = database.Gender(gender=u'女')
session.add_all([boy, girl])
session.commit()
name_entries = [database.Name(name) for name in sorted_names]
session.add_all(name_entries)
session.commit()
| mit | 7,396,839,309,927,074,000 | 22.885246 | 102 | 0.671242 | false | 3.606436 | false | false | false |
arnaldorusso/PyPAM | PyPAM/etr_models.py | 1 | 6198 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from rpy import r
def platt(light, etr, ini=None):
"""
Adjust a curve of best fit, following the Platt model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
etr : arr
Eletron Transference Rate, by means relative ETR, obtained from
Rapid Light Curves.
ini : List
optional intial values for optimization proccess.
Returns
-------
iniR : arr
Initial values modeled, with R `optim` function.
opts : arr
Curve adjusted with ETR values modeled.
pars : arr
Curve parameters (alpha, Ek, ETRmax)
See Also
--------
T. Platt, C.L. Gallegos and W.G. Harrison, 1980. Photoinibition of
photosynthesis in natural assemblages of marine phytoplankton. Journal
of Marine Research, 38:4, 687-701.
"""
opts = []
pars = []
r.assign("x", light[~np.isnan(light)])
r.assign("y", etr[~np.isnan(etr)])
if ini is None:
r.assign('ini', [0.4, 1.5, 1500])
else:
r.assign('ini', np.array(ini))
min_platt = r("""
platt<- function(params){
alpha<-params[1]
Beta<- params[2]
Ps<- params[3]
return( sum( (y-Ps*(1-exp(-alpha*x/Ps))*exp(-Beta*x/Ps))^2))
} """)
min_adp = r("""
min_ad<-function(params){
alpha<-params[1]
Beta<-params[2]
Ps<-params[3]
return( ( (Ps*(1-exp(-alpha*x/Ps)) *exp(-Beta*x/Ps)) ) )
}""")
r('etr_sim<-optim(par=ini, fn=platt)')
r('p_alpha<-etr_sim$par[1]')
r('p_Beta<-etr_sim$par[2]')
r('p_Ps2<-etr_sim$par[3]')
r('''
if (p_Beta==0 | p_Beta<0){
p_etrmax<-p_Ps2
}else {
p_etrmax<-p_Ps2*(p_alpha/(p_alpha+p_Beta))*
(p_Beta/(p_alpha+p_Beta))^(p_Beta/p_alpha)
}
p_Ek<-p_etrmax/p_alpha
''')
iniR = r('etr_sim$par')
opts = np.append(opts, r('min_ad(par = etr_sim$par)'))
cpars = r('as.data.frame(cbind(p_alpha, p_Ek, p_etrmax))')
pars = [cpars['p_alpha'], cpars['p_Ek'], cpars['p_etrmax']]
return iniR, opts, pars
def platt_opts(light, params):
"""
Adjust `opt` values of PAR levels following the Platt model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
params: arr
Containing values of (alpha, Beta, etrmax).
Returns
-------
opts : arr
Values optimized according to `params`and list of PAR levels.
"""
opts = []
r.assign("light", light[~np.isnan(light)])
r.assign("params", params)
# if opt == None:
# r.assign("opt", light[~np.isnan(light)])
# else:
# r.assign("opt", opt[~np.isnan(opt)])
# if ini == None:
# r.assign('ini', [0.4,1.5,1500])
# else:
# r.assign('ini', np.array(ini))
# op, platt_param = platt(light,etr, ini=ini)
# r.assign('platt_param', platt_param)
min_opt = r("""
min_opt<-function(light,params){
alpha<-params[1]
Beta<-params[2]
Ps<-params[3]
return( ( (Ps*(1-exp(-alpha*light/Ps)) *exp(-Beta*light/Ps)) ) )
}""")
opts = np.append(opts, r('min_opt(light, params)'))
return opts
def eilers_peeters(light, etr, ini=None):
"""
Adjust a best fit curve to ExP curves, according to Eilers & Peters
Model.
Parameters
----------
light : arr
Generally PAR values. Where Photosynthetic Active Radiance
interfer on Primary Production. 'light' is this parameter.
etr : arr
Eletron Transference Rate, by means relative ETR, obtained from
Rapid Light Curves.
ini : None
Initial values values to set the curve.
To insert initial values, they must be a list
of values of initial parameters (a,b,c) of Eilers-Peeters models
Return
------
iniR : arr
Initial values modeled, with R `optim` function.
opts : arr
Values optimized
params : arr
Curve Parameters (alpha, Ek, ETR_max)
See Also
--------
P.H.C. Eilers and J.C.H Peeters. 1988. A model for the relationship
between the light intensity and the rate of photosynthesis in
phytoplankton. Ecol. Model. 42:199-215.
#TODO
## Implement minimisation in Python.
## It's not very clear how to apply `nls2` in Python.
## minimize from a list of initial values.
##a = varis[0]
##b = varis[1]
##c = varis[2]
#a = mini['a']
#b = mini['b']
#c = mini['c']
#opts = (light/(a*(light**2)+(b*light)+c))
#ad = fmin(ep_minimize,varis,args=(light,etr))
#alpha = (1./ad[2])
#etrmax = 1./(ad[1]+2*(ad[0]*ad[2])**0.5)
#Eopt = (ad[2]/ad[0])**0.5
#Ek = etrmax/alpha
#params = [alpha, Ek, etrmax, Eopt]
"""
r('library(nls2)')
r.assign("x", light[~np.isnan(light)])
r.assign("y", etr[~np.isnan(etr)])
r('dat<-as.data.frame(cbind(x,y))')
r('names(dat)<-c("light","etr")')
if ini is None:
r('''grid<-expand.grid(list(a=seq(1e-07,9e-06,by=2e-07),
b=seq(-0.002,0.006,by=0.002),c=seq(-6,6,by=2)))''')
mini = r('''
mini<-coefficients(nls2(etr~light/(a*light^2+b*light+c),
data=dat, start=grid, algorithm="brute-force"))
''')
else:
mini = ini
r.assign("mini", mini)
r('''ep<-nls(etr~light/(a*light^2+b*light+c),data=dat,
start=list(a=mini[1],b=mini[2],c=mini[3]),
lower = list(0,-Inf,-Inf), trace=FALSE,
algorithm = "port", nls.control("maxiter"=100000, tol=0.15))
a2<-summary(ep)$coefficients[1]
b2<-summary(ep)$coefficients[2]
c2<-summary(ep)$coefficients[3]
alpha<-(1/c2)
etrmax<-1/(b2+2*(a2*c2)^0.5)
Eopt<-(c2/a2)^0.5
Ek<-etrmax/alpha''')
iniR = mini
alpha = r('alpha')
Ek = r('Ek')
etr_max = r('etrmax')
params = [alpha, Ek, etr_max]
opts = r('opts<-fitted(ep)')
return iniR, opts, params
| mit | -5,889,686,842,290,081,000 | 25.715517 | 78 | 0.556308 | false | 2.947218 | false | false | false |
smilix/TracMoinMoinAuth | tracmoinmoinauth/moinmoin_user_dir.py | 1 | 3917 | # -*- coding: utf-8 -*-
#
# Manages user accounts stored in MoinMoin user directory.
# Author: [email protected]
from os import listdir, stat
from os.path import join, exists
import re
from passlib.context import CryptContext
class MoinMoinUserDir():
USER_FILE_RE = re.compile(r'^[0-9\.]+$')
def __init__(self, logger, mm_user_dir, disable_cache):
if mm_user_dir is None:
raise ValueError('No "mm_user_dir" configuration.')
if not exists(mm_user_dir):
raise ValueError('mm_user_dir "%s" doesn`t exist!' % mm_user_dir)
self._crypt_context = CryptContext(
# is the default value in the MoinMoin wiki
schemes=['sha512_crypt', ]
)
self._log = logger
self._mm_user_dir = mm_user_dir
self._disable_cache = disable_cache
self._user_cache = None
self._user_cache_check = None
def get_users(self):
users = self._list_users_and_pw()
user_list = []
for name in users:
user_list.append(name)
return user_list
def check_password(self, user, password):
users = self._list_users_and_pw()
for name in users:
if name == user:
pw_correct = self._crypt_context.verify(password, users[name])
self._log.info('User %s found, pw check success: %s' % (name, pw_correct))
return pw_correct
return None
def _list_users_and_pw(self):
if not self._must_read_again():
return self._user_cache
self._log.debug('read user data again')
users = {}
for user_file in listdir(self._mm_user_dir):
if self.USER_FILE_RE.match(user_file) is None:
continue
(name, password) = self._get_name_and_password(user_file)
if name is None:
continue
name = name.decode('utf8')
users[name] = password
self._user_cache = users
return users
def _must_read_again(self):
if self._disable_cache:
return True
if self._user_cache is None or self._user_cache_check is None:
self._user_cache_check = self._get_dir_check_value()
return True
new_check = self._get_dir_check_value()
if new_check == self._user_cache_check:
return False
self._user_cache_check = new_check
return True
def _get_dir_check_value(self):
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = stat(self._mm_user_dir)
return '%s-%s-%s-%s' % (size, atime, mtime, ctime)
def _get_name_and_password(self, file_name):
name_prefix = 'name='
pw_prefix = 'enc_password='
scheme_prefix = '{PASSLIB}'
name, password = None, None
with open(join(self._mm_user_dir, file_name), "r") as file:
for line in file:
if line.startswith(name_prefix):
# remove prefix and newline
name = line[len(name_prefix):len(line) - 1]
elif line.startswith(pw_prefix):
# remove prefix and newline
password = line[len(pw_prefix):len(line) - 1]
# check for passlib prefix
if not password.startswith(scheme_prefix):
self._log.warn('Unsupported scheme prefix. User "%s" won\'t login.' % file_name.encode('utf8', 'ignore'))
return None, None
# remove the scheme prefix
password = password[len(scheme_prefix):]
if name is not None and password is not None:
return name, password
self._log.warn('No %s and %s entries found for file %s.' % (name_prefix, pw_prefix, file_name.encode('utf8', 'ignore')))
return None, None
| mit | -4,732,485,343,434,082,000 | 33.06087 | 129 | 0.551698 | false | 3.882061 | false | false | false |
otsuarez/beeping_exporter | files/beeping_exporter.py | 1 | 7851 | #!/usr/bin/env python
import re
import time
import requests
import argparse
from pprint import pprint
import ast
import os
from sys import exit
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
DEBUG = int(os.environ.get('DEBUG', '0'))
def must_env(var):
val = os.environ.get(var)
if val is None:
raise Exception('Error reading token from environment (%s)' % var)
return val
def load_env():
global BEEPING_SERVER
global BEEPING_METRICS_PORT
global BEEPING_CHECKS
BEEPING_SERVER = must_env('BEEPING_SERVER')
BEEPING_METRICS_PORT = must_env('BEEPING_METRICS_PORT')
BEEPING_CHECKS = must_env('BEEPING_CHECKS')
class BeepingCollector(object):
# The sites we want to monitor.
sites = ast.literal_eval(must_env('BEEPING_CHECKS'))
def __init__(self, target):
self._target = target.rstrip("/")
def collect(self):
sites_data = self._request_data()
self._setup_empty_prometheus_metrics()
for site in self.sites:
if DEBUG:
print "working with site: %s" % site
pprint(sites_data[site])
self._get_metrics(site,sites_data[site])
if DEBUG:
print "_prometheus_metrics"
pprint(self._prometheus_metrics)
for metric in self._prometheus_metrics.values():
yield metric
def _request_data(self):
# Request the information we need from Beeping
beeping_url = '{0}/check'.format(self._target) # @TODO no need for the format i think
if DEBUG:
print "_request_data >> beeping_url: %s" % beeping_url
def queryBeeping(myurl):
result = {}
for site in self.sites:
result[site] = {}
data = {}
params = self.sites[site]
response = requests.post(myurl, json=params)
if response.status_code != requests.codes.ok:
return[]
data = response.json()
result[site] = data
return result
return queryBeeping(beeping_url)
def _setup_empty_prometheus_metrics(self):
# The metrics we want to export.
self._prometheus_metrics = {}
self._prometheus_metrics = {
'dns_lookup':
GaugeMetricFamily('beeping_dns_lookup',
'site dns_lookup in seconds', labels=["site"]),
'tcp_connection':
GaugeMetricFamily('beeping_tcp_connection',
'site tcp_connection in seconds', labels=["site"]),
'tls_handshake':
GaugeMetricFamily('beeping_tls_handshake',
'site tls_handshake in seconds', labels=["site"]),
'server_processing':
GaugeMetricFamily('beeping_server_processing',
'site server_processing in seconds', labels=["site"]),
'content_transfer':
GaugeMetricFamily('beeping_content_transfer',
'site content_transfer in seconds', labels=["site"]),
'http_request_time':
GaugeMetricFamily('beeping_http_request_time_seconds',
'site http_request_time in seconds', labels=["site"]),
'http_status_code':
GaugeMetricFamily('beeping_http_status_code',
'site http_status_code', labels=["site"]),
'http_body_pattern':
GaugeMetricFamily('beeping_http_body_pattern',
'site http_body_pattern found', labels=["site"]),
'timeline_name_lookup':
GaugeMetricFamily('beeping_timeline_name_lookup',
'site timeline name_lookup in seconds', labels=["site"]),
'timeline_connect':
GaugeMetricFamily('beeping_timeline_connect',
'site timeline connect in seconds', labels=["site"]),
'timeline_pretransfer':
GaugeMetricFamily('beeping_timeline_pretransfer',
'site timeline pretransfer in seconds', labels=["site"]),
'timeline_starttransfer':
GaugeMetricFamily('beeping_timeline_starttransfer',
'site timeline starttransfer in seconds', labels=["site"]),
'ssl_cert_expiry_days_left':
GaugeMetricFamily('beeping_ssl_cert_expiry_days_left',
'ssl cert expiry days left', labels=["site"]),
}
def _get_metrics(self, site, site_data):
if DEBUG:
print "====== get_metrics checking site: "+site
print site_data.get('http_status_code')
if site_data.get('http_status_code', 0):
self._prometheus_metrics['http_status_code'].add_metric([site], site_data.get('http_status_code'))
if site_data.get('http_body_pattern'):
http_body_pattern_value = 1
else:
http_body_pattern_value = 0
self._prometheus_metrics['http_body_pattern'].add_metric([site], http_body_pattern_value)
# metrics
self._prometheus_metrics['dns_lookup'].add_metric([site], site_data.get('dns_lookup'))
self._prometheus_metrics['tcp_connection'].add_metric([site], site_data.get('tcp_connection'))
if site_data.get('tls_handshake', 0):
self._prometheus_metrics['tls_handshake'].add_metric([site], site_data.get('tls_handshake'))
self._prometheus_metrics['server_processing'].add_metric([site], site_data.get('server_processing'))
self._prometheus_metrics['content_transfer'].add_metric([site], site_data.get('content_transfer'))
self._prometheus_metrics['http_request_time'].add_metric([site], site_data.get('http_request_time'))
# timeline data
self._prometheus_metrics['timeline_name_lookup'].add_metric([site], site_data.get('timeline',0).get('name_lookup',0))
self._prometheus_metrics['timeline_connect'].add_metric([site], site_data.get('timeline',0).get('connect',0))
self._prometheus_metrics['timeline_pretransfer'].add_metric([site], site_data.get('timeline',0).get('pretransfer',0))
self._prometheus_metrics['timeline_starttransfer'].add_metric([site], site_data.get('timeline',0).get('starttransfer',0))
# ssl
if site_data.get('ssl'):
self._prometheus_metrics['ssl_cert_expiry_days_left'].add_metric([site], site_data.get('ssl').get('cert_expiry_days_left'))
def parse_args():
parser = argparse.ArgumentParser(
description='beeping exporter args beeping address and port'
)
parser.add_argument(
'-j', '--beeping',
metavar='beeping',
required=False,
help='server url from the beeping api',
default=os.environ.get('BEEPING_SERVER', 'http://localhost:8080')
)
parser.add_argument(
'-p', '--port',
metavar='port',
required=False,
type=int,
help='Listen to this port',
default=int(os.environ.get('BEEPING_METRICS_PORT', '9118'))
)
return parser.parse_args()
BEEPING_SERVER = None
BEEPING_METRICS_PORT = None
BEEPING_CHECKS = None
def main():
try:
load_env()
args = parse_args()
port = int(args.port)
REGISTRY.register(BeepingCollector(args.beeping))
start_http_server(port)
print "Polling %s. Serving at port: %s" % (args.beeping, port)
while True:
time.sleep(1)
except KeyboardInterrupt:
print(" Interrupted")
exit(0)
if __name__ == "__main__":
main()
| mit | -9,041,899,436,335,090,000 | 39.056122 | 135 | 0.575723 | false | 4.044822 | false | false | false |
google/tink | examples/python/streaming_aead/streaming_aead.py | 1 | 5686 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START streaming-aead-example]
"""A command-line utility for using streaming AEAD for a file.
It loads cleartext keys from disk - this is not recommended!
It requires 4 arguments (and one optional one):
mode: either 'encrypt' or 'decrypt'
keyset_path: name of the file with the keyset to be used for encryption or
decryption
input_path: name of the file with the input data to be encrypted or decrypted
output_path: name of the file to write the ciphertext respectively plaintext
to
[optional] associated_data: the associated data used for encryption/decryption
provided as a string.
"""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from typing import BinaryIO
# Special imports
from absl import app
from absl import flags
from absl import logging
import tink
from tink import cleartext_keyset_handle
from tink import streaming_aead
FLAGS = flags.FLAGS
BLOCK_SIZE = 1024 * 1024 # The CLI tool will read/write at most 1 MB at once.
flags.DEFINE_enum('mode', None, ['encrypt', 'decrypt'],
'Selects if the file should be encrypted or decrypted.')
flags.DEFINE_string('keyset_path', None,
'Path to the keyset used for encryption or decryption.')
flags.DEFINE_string('input_path', None, 'Path to the input file.')
flags.DEFINE_string('output_path', None, 'Path to the output file.')
flags.DEFINE_string('associated_data', None,
'Associated data used for the encryption or decryption.')
def read_as_blocks(file: BinaryIO):
"""Generator function to read from a file BLOCK_SIZE bytes.
Args:
file: The file object to read from.
Yields:
Returns up to BLOCK_SIZE bytes from the file.
"""
while True:
data = file.read(BLOCK_SIZE)
# If file was opened in rawIO, EOF is only reached when b'' is returned.
# pylint: disable=g-explicit-bool-comparison
if data == b'':
break
# pylint: enable=g-explicit-bool-comparison
yield data
def encrypt_file(input_file: BinaryIO, output_file: BinaryIO,
associated_data: bytes,
primitive: streaming_aead.StreamingAead):
"""Encrypts a file with the given streaming AEAD primitive.
Args:
input_file: File to read from.
output_file: File to write to.
associated_data: Associated data provided for the AEAD.
primitive: The streaming AEAD primitive used for encryption.
"""
with primitive.new_encrypting_stream(output_file,
associated_data) as enc_stream:
for data_block in read_as_blocks(input_file):
enc_stream.write(data_block)
def decrypt_file(input_file: BinaryIO, output_file: BinaryIO,
associated_data: bytes,
primitive: streaming_aead.StreamingAead):
"""Decrypts a file with the given streaming AEAD primitive.
This function will cause the program to exit with 1 if the decryption fails.
Args:
input_file: File to read from.
output_file: File to write to.
associated_data: Associated data provided for the AEAD.
primitive: The streaming AEAD primitive used for decryption.
"""
try:
with primitive.new_decrypting_stream(input_file,
associated_data) as dec_stream:
for data_block in read_as_blocks(dec_stream):
output_file.write(data_block)
except tink.TinkError as e:
logging.exception('Error decrypting ciphertext: %s', e)
exit(1)
def main(argv):
del argv
associated_data = b'' if not FLAGS.associated_data else bytes(
FLAGS.associated_data, 'utf-8')
# Initialise Tink.
try:
streaming_aead.register()
except tink.TinkError as e:
logging.exception('Error initialising Tink: %s', e)
return 1
# Read the keyset into a keyset_handle.
with open(FLAGS.keyset_path, 'rt') as keyset_file:
try:
text = keyset_file.read()
keyset_handle = cleartext_keyset_handle.read(tink.JsonKeysetReader(text))
except tink.TinkError as e:
logging.exception('Error reading key: %s', e)
return 1
# Get the primitive.
try:
streaming_aead_primitive = keyset_handle.primitive(
streaming_aead.StreamingAead)
except tink.TinkError as e:
logging.exception('Error creating streaming AEAD primitive from keyset: %s',
e)
return 1
# Encrypt or decrypt the file.
with open(FLAGS.input_path, 'rb') as input_file:
with open(FLAGS.output_path, 'wb') as output_file:
if FLAGS.mode == 'encrypt':
encrypt_file(input_file, output_file, associated_data,
streaming_aead_primitive)
elif FLAGS.mode == 'decrypt':
decrypt_file(input_file, output_file, associated_data,
streaming_aead_primitive)
if __name__ == '__main__':
flags.mark_flag_as_required('mode')
flags.mark_flag_as_required('keyset_path')
flags.mark_flag_as_required('input_path')
flags.mark_flag_as_required('output_path')
app.run(main)
# [END streaming-aead-example]
| apache-2.0 | 2,217,489,472,260,567,300 | 33.047904 | 80 | 0.688181 | false | 3.803344 | false | false | false |
brandonrobertz/SenateVotingRecord2CSV | senatevotingrecord2csv.py | 1 | 5837 | #!/usr/bin/python
""" This tool loads Senate Data directly from the government's website, parses
thru the XML files, and converts into a usable CSV file.
by Brandon Roberts 2012 copyleft GPLv3+."""
import requests
import argparse
import xml.etree.ElementTree as ET
import csv
import re
def fetch( url):
headers = {"User-Agent": "Mozilla/5.0 (Window NT 6.1; WOW64; rv:17.0)"
"Gecko/17.0 Firefox/17.0"}
loaded = False
while not loaded:
try:
r = requests.get(url, headers=headers, allow_redirects=False)
if r.status_code == 200:
return r.text
except Exception as e:
print "[!] Error fetching %s\n[!] %s" % (url, e)
def fetch_bills( SENATE, SESSION ):
""" This first fetches the number of bills in this senate session, then it
iterates through them and collects the raw xml for each bill's vote.
Parameters:
SENATE : The number of senate to search, i.e., 111
SESSION : Which senate session? i.e, 1 or 2
Returns:
A list of every XML file containing information about every
bill voted on for the senate session.
"""
bills = []
# Get number of bills from internet
URLM=( "http://www.senate.gov/legislative/LIS/roll_call_lists/"
"vote_menu_%s_%s.xml"%(SENATE,SESSION))
xmlf0 = fetch( URLM)
tree = ET.fromstring(xmlf0)
# this is the number of bills
TOTAL_BILLS = int(tree[3][0][0].text)
print "[*] Total Bills in Senate %s Session %s: %s" % (SENATE, SESSION,
TOTAL_BILLS)
# Get all senate voting files ... this could be multiprocessed, but it's
# not really worth the effort to me right now
bills = []
for b in xrange( 1, TOTAL_BILLS+1):
b = str(b).zfill(5)
print( "[ ] Fetching record SENATE: %s SESSION: "
"%s NUM: %s"%(SENATE, SESSION, b))
URL=("http://www.senate.gov/legislative/LIS/roll_call_votes/vote%s%s/"
"vote_%s_%s_%s.xml"%(SENATE, SESSION, SENATE, SESSION, b))
bill = fetch( URL)
bills.append( bill)
return bills
def process_bills( FIRST, LAST, bills):
""" This returns a particular senator's voting record from raw XML text
with information about senate bills and their voters.
Parameters:
FIRST : Senator's first name
LAST : Senator's last name
bills : a list of raw XML strings containing the senate voting records
Returns:
A iterable of a particular senators' voting record for each bill.
"""
print "[*] TOTAL BILLS TO PROCESS %s" % len( bills)
n = 0
for bill in bills:
print "[*] PROCESSING NUM: %s"%n
n +=1
tree = ET.fromstring( bill)
# Get votes from this record
text = tree[7].text
if text:
text = re.sub('[^A-Za-z0-9 ]', '', text)
# this next section loops through all the voters (senators) and looks
# for a vote from the senator we want
last = ""
first= ""
vote = ""
for member in tree[17]:
l = member[1].text # last
f = member[2].text # first
v = member[5].text # vote
if l.lower() == LAST.lower() and f.lower() == FIRST.lower():
last = l
first = f
vote = v
break
if vote == "Yea" or vote == "Nay":
yield text, vote
def voting_record( FIRST, LAST, SENATE, SESSION):
""" This is a wrapper for the process_bills and fetch_bills functions. Give
it a Senator's first and last names, the senate number and session and
it will tell you how the senator voted on particular bills.
Paramaters:
FIRST : Senator's first name
LAST : Senator's last name
SENATE : The number of senate to search, i.e., 111
SESSION : Which senate session? i.e, 1 or 2
Returns:
A an iterable list of how a senator voted on each bill.
"""
bills = fetch_bills( SENATE, SESSION )
print "[*] Processing bills XML"
return process_bills( FIRST, LAST, bills)
def argz():
parser = argparse.ArgumentParser()
desc =("This tool loads Senate Data directly from the government's website,"
" parses thru the XML files, and converts into a usable CSV file. "
"It's classified by Yea or Nay vote and "
"looks at the description of the bill as the string. "
"by Brandon Roberts 2012 copyleft GPL3+.")
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("first", type=str, help="first name of politician")
parser.add_argument("last", type=str, help="last name of politician")
parser.add_argument("senate", type=str, help="senate ... 111th would be 111")
parser.add_argument("session", type=str, help="1 or 2")
parser.add_argument("ofile", type=str, help="file to write csv to")
return parser.parse_args()
def write_csv( recs, OFILE):
""" Write our senate voting record to disk.
Parameters:
recs : our iterable list containing a senate voting record
OFILE : the filename to write the CSV to
"""
if ".csv" not in OFILE:
filename = "%s.csv"%OFILE
else:
filename = OFILE
print "[*] Writing to %s"%filename
header = [ "BILL_SUMMARY", "VOTE"]
with open(filename, 'wb') as f:
w = csv.writer(f, header)
w.writerow( header)
w.writerows( recs)
def main():
# do cmd line arguments
args = argz()
# our input varz
SENATE = args.senate # 111
SESSION = args.session # 1
LAST = args.last # "Franken"
FIRST = args.first # "Al"
OFILE = args.ofile # "franken.2009.arff"
# fetch voting record
print "[*] Getting %s %s's voting record"%(FIRST, LAST)
recs = voting_record( FIRST, LAST, SENATE, SESSION)
# write voting record
write_csv( recs, OFILE)
# who woulda thought it was so easy?
print "[*] Boom Shucka lucka."
if __name__ == "__main__":
main() | gpl-3.0 | -4,388,949,927,277,576,000 | 32.745665 | 79 | 0.624293 | false | 3.303339 | false | false | false |
yijingping/loganalyzer | loganalyzer/settings/base.py | 1 | 3893 | """Base settings shared by all environments"""
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import * # pylint: disable=W0614,W0401
#==============================================================================
# Generic Django project settings
#==============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '=q9gc4e4c=w^-4fnt3#vsnsm=n*k_ttcr@8_6wtma(h*w4-e$_'
INSTALLED_APPS = (
'loganalyzer.apps.nginxlog',
'south',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
)
#==============================================================================
# Calculation of directories relative to the project module location
#==============================================================================
import os
import sys
import loganalyzer as project_module
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
PYTHON_BIN = os.path.dirname(sys.executable)
ve_path = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif ve_path and os.path.exists(os.path.join(ve_path, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(ve_path, 'var')
else:
# Set the variable root to a path in the project which is
# ignored by the repository.
VAR_ROOT = os.path.join(PROJECT_DIR, 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
#==============================================================================
# Project URLS and media settings
#==============================================================================
ROOT_URLCONF = 'loganalyzer.urls'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
#==============================================================================
# Templates
#==============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
)
#==============================================================================
# Middleware
#==============================================================================
MIDDLEWARE_CLASSES += (
)
#==============================================================================
# Auth / security
#==============================================================================
AUTHENTICATION_BACKENDS += (
)
#==============================================================================
# Miscellaneous project settings
#==============================================================================
#==============================================================================
# Third party app settings
#==============================================================================
| bsd-3-clause | 6,763,360,328,404,182,000 | 30.909836 | 79 | 0.469304 | false | 4.474713 | false | false | false |
robcarver17/pysystemtrade | sysdata/production/capital.py | 1 | 19453 | from copy import copy
import datetime
import pandas as pd
from syscore.objects import arg_not_supplied, failure, missing_data
from sysdata.production.timed_storage import (
listOfEntriesData,
)
from sysobjects.production.capital import capitalEntry, LIST_OF_COMPOUND_METHODS, totalCapitalUpdater
## All capital is stored by strategy, but some 'strategies' actually relate to the total global account
from sysobjects.production.timed_storage import listOfEntries
GLOBAL_STRATEGY = "_GLOBAL"
BROKER_ACCOUNT_VALUE = "_BROKER"
MAXIMUM_ACCOUNT_VALUE = "_MAX"
ACC_PROFIT_VALUES = "_PROFIT"
SPECIAL_NAMES = [
GLOBAL_STRATEGY,
BROKER_ACCOUNT_VALUE,
MAXIMUM_ACCOUNT_VALUE,
ACC_PROFIT_VALUES,
]
class capitalForStrategy(listOfEntries):
"""
A list of capitalEntry
"""
def _entry_class(self):
return capitalEntry
class capitalData(listOfEntriesData):
"""
Store and retrieve the capital assigned to a particular strategy
A separate process is required to map from account value to strategy capital
We also store the total account value (GLOBAL STRATEGY), broker account value (BROKER_ACCOUNT_VALUE),
and for half compounding purposes MAXIMUM_ACCOUNT_VALUE
"""
def _data_class_name(self) ->str:
return "sysdata.production.capital.capitalForStrategy"
def get_total_capital_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(GLOBAL_STRATEGY)
def get_broker_account_value_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(BROKER_ACCOUNT_VALUE)
def get_maximum_account_value_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(MAXIMUM_ACCOUNT_VALUE)
def get_profit_and_loss_account_pd_df(self) -> pd.DataFrame:
return self.get_capital_pd_df_for_strategy(ACC_PROFIT_VALUES)
def get_capital_pd_df_for_strategy(self, strategy_name: str) -> pd.DataFrame:
capital_series = self.get_capital_series_for_strategy(strategy_name)
pd_series = capital_series.as_pd_df()
return pd_series
def get_capital_series_for_strategy(self, strategy_name: str) -> capitalForStrategy:
capital_series = self._get_series_for_args_dict(
dict(strategy_name=strategy_name)
)
return capital_series
def get_current_total_capital(self) -> float:
return self.get_current_capital_for_strategy(GLOBAL_STRATEGY)
def get_broker_account_value(self) -> float:
return self.get_current_capital_for_strategy(BROKER_ACCOUNT_VALUE)
def get_current_maximum_account_value(self) -> float:
return self.get_current_capital_for_strategy(MAXIMUM_ACCOUNT_VALUE)
def get_current_pandl_account(self) -> float:
return self.get_current_capital_for_strategy(ACC_PROFIT_VALUES)
def get_current_capital_for_strategy(self, strategy_name: str) -> float:
current_capital_entry = self.get_last_entry_for_strategy(strategy_name)
if current_capital_entry is missing_data:
return missing_data
capital_value = current_capital_entry.capital_value
return capital_value
def get_date_of_last_entry_for_strategy(self, strategy_name: str) -> datetime.datetime:
current_capital_entry = self.get_last_entry_for_strategy(strategy_name)
if current_capital_entry is missing_data:
return missing_data
entry_date = current_capital_entry.date
return entry_date
def get_last_entry_for_strategy(self, strategy_name: str) -> capitalEntry:
current_capital_entry = self._get_current_entry_for_args_dict(
dict(strategy_name=strategy_name)
)
return current_capital_entry
def update_broker_account_value(
self,
new_capital_value: float,
date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
BROKER_ACCOUNT_VALUE, new_capital_value, date=date
)
def update_profit_and_loss_account(
self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
ACC_PROFIT_VALUES, new_capital_value, date=date
)
def update_total_capital(self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
self.update_capital_value_for_strategy(
GLOBAL_STRATEGY, new_capital_value, date=date
)
def update_maximum_capital(self, new_capital_value: float, date: datetime.datetime=arg_not_supplied):
return self.update_capital_value_for_strategy(
MAXIMUM_ACCOUNT_VALUE, new_capital_value, date=date
)
def update_capital_value_for_strategy(
self, strategy_name: str, new_capital_value: float, date: datetime.datetime=arg_not_supplied
):
new_capital_entry = capitalEntry(new_capital_value, date=date)
try:
self._update_entry_for_args_dict(
new_capital_entry, dict(strategy_name=strategy_name)
)
except Exception as e:
self.log.warn(
"Error %s when updating capital for %s with %s"
% (str(e), strategy_name, str(new_capital_entry))
)
def get_list_of_strategies_with_capital(self) -> list:
strategy_names = self._get_list_of_strategies_with_capital_including_reserved_names()
for strat_name in SPECIAL_NAMES:
try:
strategy_names.remove(strat_name)
except IndexError:
# Don't have to have capital defined
pass
return strategy_names
def _get_list_of_strategies_with_capital_including_reserved_names(self) -> list:
list_of_args_dict = self._get_list_of_args_dict()
strategy_names = [d["strategy_name"] for d in list_of_args_dict]
strategy_names = list(set(strategy_names))
return strategy_names
def delete_last_capital_for_strategy(
self, strategy_name: str, are_you_sure=False):
self._delete_last_entry_for_args_dict(
dict(strategy_name=strategy_name), are_you_sure=are_you_sure
)
def delete_all_capital_for_strategy(
self, strategy_name: str, are_you_really_sure=False):
self._delete_all_data_for_args_dict(
dict(
strategy_name=strategy_name),
are_you_really_sure=are_you_really_sure)
def delete_all_special_capital_entries(self, are_you_really_sure=False):
for strat_name in SPECIAL_NAMES:
self.delete_all_capital_for_strategy(
strat_name, are_you_really_sure=are_you_really_sure
)
def delete_recent_capital_for_total_strategy(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
GLOBAL_STRATEGY, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_maximum(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
MAXIMUM_ACCOUNT_VALUE, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_broker_value(
self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
BROKER_ACCOUNT_VALUE, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_pandl(self, start_date: datetime.datetime, are_you_sure=False):
self.delete_recent_capital_for_strategy(
ACC_PROFIT_VALUES, start_date, are_you_sure=are_you_sure
)
def delete_recent_capital_for_strategy(
self, strategy_name: str, start_date: datetime.datetime, are_you_sure=False
):
have_capital_to_delete = True
while have_capital_to_delete:
last_date_in_data = self.get_date_of_last_entry_for_strategy(strategy_name)
if last_date_in_data is missing_data:
## gone to the start, nothing left
break
if last_date_in_data < start_date:
# before the start date, so don't want to delete
break
else:
self.delete_last_capital_for_strategy(
strategy_name, are_you_sure=are_you_sure
)
class totalCapitalCalculationData(object):
"""
This object allows us to calculate available total capital from previous capital and profits
It uses the special strategy names GLOBAL_STRATEGY and BROKER_ACCOUNT_VALUE, MAXIMUM and PROFIT
Three different compounding methods are available ['full', 'half', 'fixed']
"""
def __init__(self, capital_data: capitalData, calc_method="full"):
"""
Calculation methods are: full- all profits and losses go to capital, half - profits past the HWM are not added,
fixed - capital is unaffected by profits and losses (not reccomended!)
:param capital_data: capitalData instance or something that inherits from it
:param calc_method: method for going from profits to capital allocated
"""
self._capital_data = capital_data
try:
assert calc_method in LIST_OF_COMPOUND_METHODS
except BaseException:
raise Exception(
"Capital calculation %s has to be one of %s"
% (calc_method, LIST_OF_COMPOUND_METHODS)
)
self._calc_method = calc_method
@property
def capital_data(self):
return self._capital_data
@property
def calc_method(self):
return self._calc_method
def __repr__(self):
return "capitalCalculationData for %s" % self._capital_data
def get_current_total_capital(self):
return self.capital_data.get_current_total_capital()
def get_total_capital(self) ->pd.DataFrame:
return self.capital_data.get_total_capital_pd_df()
def get_profit_and_loss_account(self) -> pd.DataFrame():
return self.capital_data.get_profit_and_loss_account_pd_df()
def get_broker_account(self) -> pd.DataFrame:
return self.capital_data.get_broker_account_value_pd_df()
def get_maximum_account(self) -> pd.DataFrame:
return self.capital_data.get_maximum_account_value_pd_df()
def get_all_capital_calcs(self) -> pd.DataFrame:
total_capital = self.get_total_capital()
max_capital = self.get_maximum_account()
acc_pandl = self.get_profit_and_loss_account()
broker_acc = self.get_broker_account()
if (
total_capital is missing_data
or max_capital is missing_data
or acc_pandl is missing_data
or broker_acc is missing_data
):
return missing_data
all_capital = pd.concat(
[total_capital, max_capital, acc_pandl, broker_acc], axis=1
)
all_capital.columns = ["Actual", "Max", "Accumulated", "Broker"]
return all_capital
def update_and_return_total_capital_with_new_broker_account_value(
self, broker_account_value: float, check_limit=0.1
) -> float:
"""
does everything you'd expect when a new broker account value arrives:
- add on to broker account value series
- get p&l since last broker
- call capital calculation function, which will update
If the change in broker account value is greater than check_limit then do not update capital
You will have to check and do a manual update if it's correct
:param value: float
:param check_limit: float
:return: current total capital
"""
# Compare broker account value to previous
capital_updater = self._init_capital_updater(broker_account_value)
capital_updater.check_pandl_size(check_limit=check_limit)
capital_updater.calculate_new_total_and_max_capital_given_pandl()
self._update_capital_data_after_pandl_event(capital_updater)
return capital_updater.new_total_capital
def _init_capital_updater(self, new_broker_account_value: float) -> totalCapitalUpdater:
calc_method = self.calc_method
prev_broker_account_value = self._get_prev_broker_account_value_create_if_no_data(new_broker_account_value)
prev_maximum_capital = self.capital_data.get_current_maximum_account_value()
prev_total_capital = self.capital_data.get_current_total_capital()
capital_updater = totalCapitalUpdater(new_broker_account_value= new_broker_account_value,
prev_total_capital = prev_total_capital,
prev_maximum_capital = prev_maximum_capital,
prev_broker_account_value = prev_broker_account_value,
calc_method = calc_method)
return capital_updater
def _get_prev_broker_account_value_create_if_no_data(self, new_broker_account_value: float) -> float:
prev_broker_account_value = self.capital_data.get_broker_account_value()
if prev_broker_account_value is missing_data:
# No previous capital, need to set everything up
self.create_initial_capital(
new_broker_account_value, are_you_really_sure=True)
prev_broker_account_value = copy(new_broker_account_value)
return prev_broker_account_value
def _update_capital_data_after_pandl_event(self, capital_updater: totalCapitalUpdater):
# Update broker account value and add p&l entry with synched dates
date = datetime.datetime.now()
new_total_capital = capital_updater.new_total_capital
new_maximum_capital = capital_updater.new_maximum_capital
new_broker_account_value = capital_updater.new_broker_account_value
profit_and_loss = capital_updater.profit_and_loss
self.capital_data.update_total_capital(
new_total_capital, date=date)
self.capital_data.update_maximum_capital(
new_maximum_capital, date=date)
self.capital_data.update_broker_account_value(
new_broker_account_value, date=date)
self._update_pandl(profit_and_loss, date)
def _update_pandl(self, profit_and_loss: float, date: datetime.datetime):
# Add P&L to accumulated p&l
prev_acc_pandl = self._capital_data.get_current_pandl_account()
new_acc_pandl = prev_acc_pandl + profit_and_loss
self._capital_data.update_profit_and_loss_account(
new_acc_pandl, date=date)
def adjust_broker_account_for_delta(self, delta_value: float):
"""
If you have changed broker account value, for example because of a withdrawal, but don't want this to
affect capital calculations
A negative delta_value indicates a withdrawal (capital value falling) and vice versa
:param value: change in account value to be ignore, a minus figure is a withdrawal, positive is deposit
:return: None
"""
prev_broker_account_value = self.capital_data.get_broker_account_value()
if prev_broker_account_value is missing_data:
self._capital_data.log.warn(
"Can't apply a delta to broker account value, since no value in data"
)
broker_account_value = prev_broker_account_value + delta_value
# Update broker account value
self.capital_data.update_broker_account_value(broker_account_value)
def modify_account_values(
self,
broker_account_value: float=arg_not_supplied,
total_capital: float=arg_not_supplied,
maximum_capital: float=arg_not_supplied,
acc_pandl: float=arg_not_supplied,
date: datetime.datetime=arg_not_supplied,
are_you_sure:bool =False,
):
"""
Allow any account valuation to be modified
Be careful! Only use if you really know what you are doing
:param value: new_maximum_account_value
:return: None
"""
if not are_you_sure:
self._capital_data.log.warn(
"You need to be sure to modify capital!")
if date is arg_not_supplied:
date = datetime.datetime.now()
if broker_account_value is not arg_not_supplied:
self.capital_data.update_broker_account_value(
broker_account_value, date=date
)
if total_capital is not arg_not_supplied:
self.capital_data.update_total_capital(total_capital, date=date)
if maximum_capital is not arg_not_supplied:
self.capital_data.update_maximum_capital(
maximum_capital, date=date)
if acc_pandl is not arg_not_supplied:
self.capital_data.update_profit_and_loss_account(
acc_pandl, date=date)
def create_initial_capital(
self,
broker_account_value: float,
total_capital: float=arg_not_supplied,
maximum_capital: float=arg_not_supplied,
acc_pandl: float=arg_not_supplied,
are_you_really_sure: bool=False,
):
"""
Used to create the initial capital series
Will delete capital! So be careful
If broker_account_value passed and total_capital not passed, then use maximum_capital
acc_pandl defaults to zero if not passed
Default is to start at HWM with broker account value, but you can modify this
:return: None
"""
self.delete_all_capital(are_you_really_sure=are_you_really_sure)
if total_capital is arg_not_supplied:
total_capital = broker_account_value
if maximum_capital is arg_not_supplied:
maximum_capital = total_capital
if acc_pandl is arg_not_supplied:
acc_pandl = 0
date = datetime.datetime.now()
self.capital_data.update_total_capital(total_capital, date=date)
self.capital_data.update_maximum_capital(maximum_capital, date=date)
self.capital_data.update_broker_account_value(
broker_account_value, date=date)
self.capital_data.update_profit_and_loss_account(acc_pandl, date=date)
def delete_recent_capital(self, start_date: datetime.datetime, are_you_sure: bool=False):
"""
Delete all capital entries on or after start date
:param start_date: pd.datetime
:return:
"""
if not are_you_sure:
self._capital_data.log.warn(
"You have to be sure to delete capital")
return failure
self.capital_data.delete_recent_capital_for_total_strategy(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_maximum(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_broker_value(
start_date, are_you_sure=are_you_sure
)
self.capital_data.delete_recent_capital_for_pandl(
start_date, are_you_sure=are_you_sure
)
def delete_all_capital(self, are_you_really_sure: bool=False):
self._capital_data.delete_all_special_capital_entries(
are_you_really_sure=are_you_really_sure
)
| gpl-3.0 | 6,900,970,793,772,945,000 | 36.053333 | 119 | 0.64242 | false | 3.817308 | false | false | false |
martinghunt/ariba | ariba/megares_data_finder.py | 2 | 2078 | import urllib.request
from bs4 import BeautifulSoup
from distutils.version import LooseVersion
class Error (Exception): pass
class MegaresDataFinder:
def __init__(self, version=None):
self.url_root = 'https://megares.meglab.org/download/'
self.index_url = self.url_root + 'index.php'
self.version = version
def _get_available_zips(self):
try:
response = urllib.request.urlopen(self.index_url)
html_text = response.read()
except:
raise Error('Error getting megares download page ' + self.index_url)
return html_text
@classmethod
def _zips_from_index_page_string(cls, html_text):
try:
soup = BeautifulSoup(html_text, 'html.parser')
except:
raise Error('Error parsing contents of megares download page. Cannot continue')
prefix = 'megares_v'
suffix = '.zip'
zips = {}
for link in soup.find_all('a'):
href = link.get('href')
if href.startswith(prefix) and href.endswith(suffix):
version = href[len(prefix):-len(suffix)]
zips[version] = href
return zips
@classmethod
def _get_url_for_version(cls, zips, version=None):
if version is None:
versions = list(zips.keys())
versions.sort(key=LooseVersion)
return zips[versions[-1]]
else:
try:
return zips[version]
except:
versions = ', '.join(list(zips.keys()))
raise Error('Error! version ' + version + ' of megares not found. Available versions: ' + versions)
def run(self):
print('Finding available megares versions from', self.index_url)
html_text = self._get_available_zips()
zips = MegaresDataFinder._zips_from_index_page_string(html_text)
print('Found versions: ', ', '.join(list(zips.keys())))
url = MegaresDataFinder._get_url_for_version(zips, version=self.version)
return self.url_root + url
| gpl-3.0 | -762,424,034,836,680,800 | 29.558824 | 115 | 0.588547 | false | 3.928166 | false | false | false |
jdowns/Homer | dao.py | 2 | 2120 | from cuisine import res
class GradeDAO(object):
groupby = {"$group":
{"_id":
{"camis": "$CAMIS",
"dba": "$DBA",
"phone": "$PHONE",
"grade": "$CURRENTGRADE"},
"RECORDDATE": {"$max":"$RECORDDATE"}
}
}
borodict = {"manhattan": 1,
"brooklyn": 3,
"queens": 4,
"statenisland": 5,
"thebronx": 2}
def __init__(self, mongo):
self.ratings = mongo.db.ratings
def __get_grades__(self, match, groupby=groupby):
resultset = self.ratings.aggregate([match, groupby])
return resultset['result']
def get_grade_by_zipcode(self, key):
match = {"$match": {"ZIPCODE": key}}
return self.__get_grades__(match)
def get_grade_by_boro(self, key):
match = {"$match": {"BORO": boro[key]}}
return self.__get_grades__(match)
def get_grade_by_cuisine(self, key):
match = {"%match": {"CUISINE": key}}
return self.__get_grades__(match)
def get_grade_by_phone(self, key):
match = {"$match": {"PHONE": key}}
groupby = {
"$group":{"_id":
{"insp_id" : "$_id",
"camis": "$CAMIS",
"dba": "$DBA",
"score": "$SCORE",
"cuisine": "$CUISINECODE",
"violation": "$VIOLCODE",
"grade": "$CURRENTGRADE",
"building": "$BUILDING",
"street": "$STREET",
"zipcode": "$ZIPCODE",
"boro": "$BORO",},
"INSPECTIONDATE": {"$max":"$INSPDATE"}}}
return self.__get_grades__(match, groupby)
def get_summary(self, boro, cuisine=False):
query = {'BORO': self.borodict[boro]}
if cuisine:
query['CUISINECODE'] = res[cuisine]
camis = self.ratings.find(query).distinct('CAMIS')
result = self.ratings.find({'CAMIS': {"$in": camis}})
return result
| gpl-3.0 | -314,681,725,125,033,200 | 33.193548 | 61 | 0.447642 | false | 3.674177 | false | false | false |
Drake81/allris-scraper | allris/pipelines.py | 1 | 1750 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals
from scrapy.contrib.exporter import JsonItemExporter
from scrapy.contrib.exporter import JsonLinesItemExporter
class AllrisPipeline(object):
def __init__(self):
pass
def process_item(self, item, spider):
for entry in item.values():
if not entry:
raise DropItem("Empty ID in item %s" % item)
item['id'] = item['id'][0]
item['name'] = item['name'][0]
item['reference'] = item['name'].split()[0]
item['originator'] = item['originator'][0].split(',')
item['publishedDate'] = item['publishedDate'][0]
item['paperType'] = item['paperType'][0]
item['mainFile'] = item['mainFile'][0]
return item
class JsonExportPipeline(object):
def __init__(self):
dispatcher.connect(self.spider_opened, signals.spider_opened)
dispatcher.connect(self.spider_closed, signals.spider_closed)
self.files = {}
def spider_opened(self, spider):
file = open('%s_items.json' % spider.name, 'w+b')
self.files[spider] = file
self.exporter = JsonItemExporter(file)
#self.exporter = JsonLinesItemExporter(file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
| gpl-3.0 | 1,782,453,825,479,687,000 | 30.25 | 69 | 0.64 | false | 3.829322 | false | false | false |
pnprog/goreviewpartner | mss/__main__.py | 1 | 2427 | # coding: utf-8
"""
This is part of the MSS Python's module.
Source: https://github.com/BoboTiG/python-mss
"""
from __future__ import print_function
import os.path
import sys
from argparse import ArgumentParser
from . import __version__
from .exception import ScreenShotError
from .factory import mss
from .tools import to_png
def main(args=None):
# type: (Optional[List[str]]) -> int
""" Main logic. """
cli_args = ArgumentParser()
cli_args.add_argument('-c', '--coordinates', default='', type=str,
help='the part of the screen to capture:'
' top, left, width, height')
cli_args.add_argument('-m', '--monitor', default=0, type=int,
help='the monitor to screen shot')
cli_args.add_argument('-o', '--output', default='monitor-{mon}.png',
help='the output file name')
cli_args.add_argument('-q', '--quiet', default=False, action='store_true',
help='do not print created files')
cli_args.add_argument('-v', '--version', action='version',
version=__version__)
options = cli_args.parse_args(args)
kwargs = {
'mon': options.monitor,
'output': options.output,
}
if options.coordinates:
try:
top, left, width, height = options.coordinates.split(',')
except ValueError:
print('Coordinates syntax: top, left, width, height')
return 2
kwargs['mon'] = {
'top': int(top),
'left': int(left),
'width': int(width),
'height': int(height),
}
if options.output == 'monitor-{mon}.png':
kwargs['output'] = 'sct-{top}x{left}_{width}x{height}.png'
try:
with mss() as sct:
if options.coordinates:
output = kwargs['output'].format(**kwargs['mon'])
sct_img = sct.grab(kwargs['mon'])
to_png(sct_img.rgb, sct_img.size, output)
if not options.quiet:
print(os.path.realpath(output))
else:
for file_name in sct.save(**kwargs):
if not options.quiet:
print(os.path.realpath(file_name))
return 0
except ScreenShotError:
return 1
if __name__ == '__main__':
exit(main(sys.argv[1:]))
| gpl-3.0 | 5,378,870,711,625,983,000 | 31.36 | 78 | 0.531108 | false | 4.018212 | false | false | false |
SilverStrange/Null-File-Detector | null/scan.py | 1 | 3933 | # -*- coding: utf-8 -*-
from functools import partial
from multiprocessing import Process
import multiprocessing as mp
import sys
import os
import platform
import unicodedata
# https://github.com/pyinstaller/pyinstaller/wiki/Recipe-Multiprocessing
# Module multiprocessing is organized differently in Python 3.4+
try:
# Python 3.4+
if sys.platform.startswith('win'):
import multiprocessing.popen_spawn_win32 as forking
else:
import multiprocessing.popen_fork as forking
except ImportError:
import multiprocessing.forking as forking
if sys.platform.startswith('win'):
# First define a modified version of Popen.
class _Popen(forking.Popen):
def __init__(self, *args, **kw):
if hasattr(sys, 'frozen'):
# We have to set original _MEIPASS2 value from sys._MEIPASS
# to get --onefile mode working.
os.putenv('_MEIPASS2', sys._MEIPASS)
try:
super(_Popen, self).__init__(*args, **kw)
finally:
if hasattr(sys, 'frozen'):
# On some platforms (e.g. AIX) 'os.unsetenv()' is not
# available. In those cases we cannot delete the variable
# but only set it to the empty string. The bootloader
# can handle this case.
if hasattr(os, 'unsetenv'):
os.unsetenv('_MEIPASS2')
else:
os.putenv('_MEIPASS2', '')
# Second override 'Popen' class with our modified version.
forking.Popen = _Popen
def read_in_chunks(file_object, chunk_size=4 * 1024 * 1024):
"""Lazy function (generator) to read a file piece by piece.
Default chunk size: 1k.
"""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
def do_work(in_queue, out_queue, null_char):
"""Pulls data from in_queue, counts number of null characters,
and sends result to out_queue.
"""
while True:
null = 0
item = in_queue.get()
for byte in item:
if byte == null_char:
null = null + 1
out_queue.put(null)
in_queue.task_done()
def scan(name, work_queue, result_queue):
"""Loads data into work_queue, then gets results from result_queue."""
try:
with open(name, 'rb') as f:
for i in read_in_chunks(f):
work_queue.put(i)
except IOError:
return 'Error'
else:
work_queue.join()
null_count = sum([result_queue.get()
for i in range(result_queue.qsize())])
return null_count
def create_workers(work_queue, result_queue, null_char=b'\x00'):
"""Generates daemonized worker processes."""
num_workers = mp.cpu_count() - 1
if num_workers < 1:
num_workers = 1
# Start workers
worker_list = []
for i in range(num_workers):
t = Process(target=do_work, args=(work_queue, result_queue, null_char))
worker_list.append(t)
t.daemon = True
t.start()
return worker_list
def scan_target(path, files, directories):
"""
Processes given path.
Adds files to files list.
If path is a directory, all subfiles and directories are added to
the files and directories lists as appropriate.
Returns list of files and list of directories.
"""
path = os.path.abspath(path)
if not os.path.isdir(path):
files.append(path)
return files, directories
directory_list = [
unicodedata.normalize('NFC', f) for f in os.listdir(path)]
for entry in directory_list:
entry_path = os.path.join(path, entry)
if os.path.isdir(entry_path):
directories.append(entry_path)
else:
files.append(entry_path)
return files, directories
| mit | 5,311,685,533,779,778,000 | 27.294964 | 79 | 0.588863 | false | 4.013265 | false | false | false |
evernym/zeno | ledger/hash_stores/hash_store.py | 2 | 4941 | from abc import abstractmethod
from ledger.util import count_bits_set
from ledger.util import highest_bit_set
class HashStore:
"""
Store of nodeHashes and leafHashes mapped against their sequence numbers.
"""
@property
@abstractmethod
def is_persistent(self) -> bool:
pass
@abstractmethod
def writeLeaf(self, leafHash):
"""
append the leafHash to the leaf hash store
:param leafHash: hash of the leaf
"""
@abstractmethod
def writeNode(self, node):
"""
append the node to the node hash store.
:param node: tuple of start, height and nodeHash
"""
@abstractmethod
def readLeaf(self, pos):
"""
Read the leaf hash at the given position in the merkle tree
:param pos: the sequence number of the leaf
:return: the leafHash at the specified position
"""
@abstractmethod
def readNode(self, pos):
"""
Read the node hash at the given position in the merkle tree
:param pos: the sequence number of the node (as calculated by
getNodePosition)
:return: the nodeHash at the specified position
"""
@abstractmethod
def readLeafs(self, startpos, endpos):
"""
Read multiple leaves at the given position.
:param startpos: read from this sequence number (inclusive)
:param endpos: read up to this sequence number (inclusive)
:return: list of leafHashes
"""
@abstractmethod
def readNodes(self, startpos, endpos):
"""
Read multiple nodes at the given position. Node position can be
calculated using getNodePosition
:param startpos: read from this sequence number (inclusive)
:param endpos: read up to this sequence number (inclusive)
:return: list of nodeHashes
"""
@property
@abstractmethod
def leafCount(self) -> int:
pass
@leafCount.setter
@abstractmethod
def leafCount(self, count: int) -> None:
pass
@property
@abstractmethod
def nodeCount(self) -> None:
pass
@classmethod
def getNodePosition(cls, start, height=None) -> int:
"""
Calculates node position based on start and height
:param start: The sequence number of the first leaf under this tree.
:param height: Height of this node in the merkle tree
:return: the node's position
"""
pwr = highest_bit_set(start) - 1
height = height or pwr
if count_bits_set(start) == 1:
adj = height - pwr
return start - 1 + adj
else:
c = pow(2, pwr)
return cls.getNodePosition(c, pwr) + \
cls.getNodePosition(start - c, height)
@classmethod
def getPath(cls, seqNo, offset=0):
"""
Get the audit path of the leaf at the position specified by serNo.
:param seqNo: sequence number of the leaf to calculate the path for
:param offset: the sequence number of the node from where the path
should begin.
:return: tuple of leafs and nodes
"""
if offset >= seqNo:
raise ValueError("Offset should be less than serial number")
pwr = highest_bit_set(seqNo - 1 - offset) - 1
if pwr <= 0:
if seqNo % 2 == 0:
return [seqNo - 1], []
else:
return [], []
c = pow(2, pwr) + offset
leafs, nodes = cls.getPath(seqNo, c)
nodes.append(cls.getNodePosition(c, pwr))
return leafs, nodes
def readNodeByTree(self, start, height=None):
"""
Fetches nodeHash based on start leaf and height of the node in the tree.
:return: the nodeHash
"""
pos = self.getNodePosition(start, height)
return self.readNode(pos)
@property
def is_consistent(self) -> bool:
"""
Returns True if number of nodes are consistent with number of leaves
"""
from ledger.compact_merkle_tree import CompactMerkleTree
return self.nodeCount == CompactMerkleTree.get_expected_node_count(
self.leafCount)
@staticmethod
def _validatePos(start, end=None):
if end and start >= end:
raise IndexError(
"start ({}) index must be less than end ({}) index"
.format(start, end)
)
if start < 1:
raise IndexError(
"seqNo starts from 1, index requested: {}".format(start))
@abstractmethod
def open(self):
pass
@abstractmethod
def close(self):
pass
@property
@abstractmethod
def closed(self):
pass
@abstractmethod
def reset(self) -> bool:
"""
Removes all data from hash store
:return: True if completed successfully
"""
| apache-2.0 | 9,138,596,346,540,573,000 | 26.915254 | 80 | 0.587128 | false | 4.427419 | false | false | false |
sylvchev/mdla | mdla/dict_metrics.py | 1 | 18435 | # -*- coding: utf-8 -*-
"""
The `dict_metrics` module implements utilities to compare
frames and dictionaries.
This module implements several criteria and metrics to compare different sets
of atoms. This module is primarily focused on multivariate kernels and
atoms.
"""
# Authors: Sylvain Chevallier <[email protected]>
# License: GPL v3
# TODO: add docstring to criteria fonction
# verify Fubini-Study scale parameter
# verify beta dist behavior, seems like 1-bd
# change scale behavior, replace 1-d with d !
import cvxopt as co
import cvxopt.solvers as solv
import numpy as np
import scipy.linalg as sl
from numpy import (
NaN,
abs,
all,
arccos,
arcsin,
argmax,
array,
atleast_2d,
concatenate,
infty,
max,
min,
ones,
ones_like,
sqrt,
trace,
unravel_index,
zeros,
zeros_like,
)
from numpy.linalg import det, norm, svd
def _kernel_registration(this_kernel, dictionary, g):
k_len = this_kernel.shape[0]
n_kernels = len(dictionary)
k_max_len = array([i.shape[0] for i in dictionary]).max()
m_dist = ones((n_kernels, k_max_len - k_len + 1)) * infty
m_corr = zeros((n_kernels, k_max_len - k_len + 1))
for i, kernel in enumerate(dictionary): # kernel loop
ks = kernel.shape[0]
# for t in range(k_max_len-k_len+1): # convolution loop
for t in range(ks - k_len + 1): # convolution loop
# print ("t = ", t, "and l =", l)
# print ("kernel = ", kernel.shape,
# "and kernel[t:t+l,:] = ", kernel[t:t+k_len,:].shape)
m_dist[i, t] = g(this_kernel, kernel[t : t + k_len, :])
m_corr[i, t] = trace(this_kernel.T.dot(kernel[t : t + k_len, :])) / (
norm(this_kernel, "fro") * norm(kernel[t : t + k_len, :], "fro")
)
return m_dist, m_corr
def principal_angles(A, B):
"""Compute the principal angles between subspaces A and B.
The algorithm for computing the principal angles is described in :
A. V. Knyazev and M. E. Argentati,
Principal Angles between Subspaces in an A-Based Scalar Product:
Algorithms and Perturbation Estimates. SIAM Journal on Scientific Computing,
23 (2002), no. 6, 2009-2041.
http://epubs.siam.org/sam-bin/dbq/article/37733
"""
# eps = np.finfo(np.float64).eps**.981
# for i in range(A.shape[1]):
# normi = la.norm(A[:,i],np.inf)
# if normi > eps: A[:,i] = A[:,i]/normi
# for i in range(B.shape[1]):
# normi = la.norm(B[:,i],np.inf)
# if normi > eps: B[:,i] = B[:,i]/normi
QA = sl.orth(A)
QB = sl.orth(B)
_, s, Zs = svd(QA.T.dot(QB), full_matrices=False)
s = np.minimum(s, ones_like(s))
theta = np.maximum(np.arccos(s), np.zeros_like(s))
V = QB.dot(Zs)
idxSmall = s > np.sqrt(2.0) / 2.0
if np.any(idxSmall):
RB = V[:, idxSmall]
_, x, _ = svd(RB - QA.dot(QA.T.dot(RB)), full_matrices=False)
thetaSmall = np.flipud(
np.maximum(arcsin(np.minimum(x, ones_like(x))), zeros_like(x))
)
theta[idxSmall] = thetaSmall
return theta
def chordal_principal_angles(A, B):
"""
chordal_principal_angles(A, B) Compute the chordal distance based on
principal angles.
Compute the chordal distance based on principal angles between A and B
as :math:`d=\sqrt{ \sum_i \sin^2 \theta_i}`
"""
return sqrt(np.sum(np.sin(principal_angles(A, B)) ** 2))
def chordal(A, B):
"""
chordal(A, B) Compute the chordal distance
Compute the chordal distance between A and B
as d=\sqrt{K - ||\bar{A}^T\bar{B}||_F^2}
where K is the rank of A and B, || . ||_F is the Frobenius norm,
\bar{A} is the orthogonal basis associated with A and the same goes for B.
"""
if A.shape != B.shape:
raise ValueError(
f"Atoms have not the same dimension ({A.shape} and {B.shape}). Error raised"
f"in chordal(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
d2 = A.shape[1] - norm(sl.orth(A).T.dot(sl.orth(B)), "fro") ** 2
if d2 < 0.0:
return sqrt(abs(d2))
else:
return sqrt(d2)
def fubini_study(A, B):
"""
fubini_study(A, B) Compute the Fubini-Study distance
Compute the Fubini-Study distance based on principal angles between A and B
as d=\acos{ \prod_i \theta_i}
"""
if A.shape != B.shape:
raise ValueError(
f"Atoms have different dim ({A.shape} and {B.shape}). Error raised in"
f"fubini_study(A, B)",
)
if np.allclose(A, B):
return 0.0
return arccos(det(sl.orth(A).T.dot(sl.orth(B))))
def binet_cauchy(A, B):
"""Compute the Binet-Cauchy distance
Compute the Binet-Cauchy distance based on principal angles between A
and B with d=\sqrt{ 1 - \prod_i \cos^2 \theta_i}
"""
theta = principal_angles(A, B)
return sqrt(1.0 - np.prod(np.cos(theta) ** 2))
def geodesic(A, B):
"""
geodesic (A, B) Compute the arc length or geodesic distance
Compute the arc length or geodesic distance based on principal angles between A
and B with d=\sqrt{ \sum_i \theta_i^2}
"""
theta = principal_angles(A, B)
return norm(theta)
def frobenius(A, B):
if A.shape != B.shape:
raise ValueError(
f"Atoms have different dim ({A.shape} and {B.shape}). Error raised in"
f"frobenius(A, B)",
)
return norm(A - B, "fro")
def abs_euclidean(A, B):
if (A.ndim != 1 and A.shape[1] != 1) or (B.ndim != 1 and B.shape[1] != 1):
raise ValueError(
f"Atoms are not univariate ({A.shape} and {B.shape}). Error raised"
f"in abs_euclidean(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
return sqrt(2.0 * (1.0 - np.abs(A.T.dot(B))))
def euclidean(A, B):
if (A.ndim != 1 and A.shape[1] != 1) or (B.ndim != 1 and B.shape[1] != 1):
raise ValueError(
f"Atoms are not univariate ({A.shape} and {B.shape}). Error raised in"
f"euclidean(A, B)",
)
if np.allclose(A, B):
return 0.0
else:
return sqrt(2.0 * (1.0 - A.T.dot(B)))
def _valid_atom_metric(gdist):
"""Verify that atom metric exist and return the correct function"""
if gdist == "chordal":
return chordal
elif gdist == "chordal_principal_angles":
return chordal_principal_angles
elif gdist == "fubinistudy":
return fubini_study
elif gdist == "binetcauchy":
return binet_cauchy
elif gdist == "geodesic":
return geodesic
elif gdist == "frobenius":
return frobenius
elif gdist == "abs_euclidean":
return abs_euclidean
elif gdist == "euclidean":
return euclidean
else:
return None
def _scale_metric(gdist, d, D1):
if (
gdist == "chordal"
or gdist == "chordal_principal_angles"
or gdist == "fubinistudy"
or gdist == "binetcauchy"
or gdist == "geodesic"
):
# TODO: scale with max n_features
return d / sqrt(D1[0].shape[0])
elif gdist == "frobenius":
return d / sqrt(2.0)
else:
return d
def _compute_gdm(D1, D2, g):
"""Compute ground distance matrix from dictionaries D1 and D2
Distance g acts as ground distance.
A kernel registration is applied if dictionary atoms do not have
the same size.
"""
# Do we need a registration? If kernel do not have the same shape, yes
if not all(array([i.shape[0] for i in D1 + D2]) == D1[0].shape[0]):
# compute correlation and distance matrices
k_dim = D1[0].shape[1]
# minl = np.array([i.shape[1] for i in D1+D2]).min()
max_l1 = array([i.shape[0] for i in D1]).max()
max_l2 = array([i.shape[0] for i in D2]).max()
if max_l2 > max_l1:
Da = D1
Db = D2
max_l = max_l2
else:
Da = D2
Db = D1
max_l = max_l1
# Set all Db atom to largest value
Dbe = []
for i in range(len(Db)):
k_l = Db[i].shape[0]
Dbe.append(concatenate((zeros((max_l - k_l, k_dim)), Db[i]), axis=0))
gdm = zeros((len(Da), len(Db)))
for i in range(len(Da)):
m_dist, m_corr = _kernel_registration(Da[i], Dbe, g)
k_l = Da[i].shape[0]
# m_dist, m_corr = _kernel_registration(np.concatenate((zeros((np.int(np.floor((max_l-k_l)/2.)), k_dim)), Da[i], zeros((np.int(np.ceil((max_l-k_l)/2.)), k_dim))), axis=0), Dbe, g)
for j in range(len(Dbe)):
gdm[i, j] = m_dist[
j, unravel_index(abs(m_corr[j, :]).argmax(), m_corr[j, :].shape)
]
else:
# all atoms have the same length, no registration
gdm = zeros((len(D1), len(D2)))
for i in range(len(D1)):
for j in range(len(D2)):
gdm[i, j] = g(D1[i], D2[j])
return gdm
def hausdorff(D1, D2, gdist, scale=False):
"""
Compute the Hausdorff distance between two sets of elements, here
dictionary atoms, using a ground distance.
Possible choice are "chordal", "fubinistudy", "binetcauchy", "geodesic",
"frobenius", "abs_euclidean" or "euclidean".
The scale parameter changes the return value to be between 0 and 1.
"""
g = _valid_atom_metric(gdist)
if g is None:
print("Unknown ground distance, exiting.")
return NaN
gdm = _compute_gdm(D1, D2, g)
d = max([max(min(gdm, axis=0)), max(min(gdm, axis=1))])
if not scale:
return d
else:
return _scale_metric(gdist, d, D1)
def emd(D1, D2, gdist, scale=False):
"""
Compute the Earth Mover's Distance (EMD) between two sets of elements,
here dictionary atoms, using a ground distance.
Possible choice are "chordal", "fubinistudy", "binetcauchy", "geodesic",
"frobenius", "abs_euclidean" or "euclidean".
The scale parameter changes the return value to be between 0 and 1.
"""
g = _valid_atom_metric(gdist)
if g is None:
print("Unknown ground distance, exiting.")
return NaN
# if gdist == "chordal":
# g = chordal
# elif gdist == "chordal_principal_angles":
# g = chordal_principal_angles
# elif gdist == "fubinistudy":
# g = fubini_study
# elif gdist == "binetcauchy":
# g = binet_cauchy
# elif gdist == "geodesic":
# g = geodesic
# elif gdist == "frobenius":
# g = frobenius
# elif gdist == "abs_euclidean":
# g = abs_euclidean
# elif gdist == "euclidean":
# g = euclidean
# else:
# print 'Unknown ground distance, exiting.'
# return NaN
# # Do we need a registration? If kernel do not have the same shape, yes
# if not np.all(np.array([i.shape[0] for i in D1+D2]) == D1[0].shape[0]):
# # compute correlation and distance matrices
# k_dim = D1[0].shape[1]
# # minl = np.array([i.shape[1] for i in D1+D2]).min()
# max_l1 = np.array([i.shape[0] for i in D1]).max()
# max_l2 = np.array([i.shape[0] for i in D2]).max()
# if max_l2 > max_l1:
# Da = D1
# Db = D2
# max_l = max_l2
# else:
# Da = D2
# Db = D1
# max_l = max_l1
# Dbe = []
# for i in range(len(Db)):
# k_l = Db[i].shape[0]
# Dbe.append(np.concatenate((zeros((max_l-k_l, k_dim)), Db[i]), axis=0))
# gdm = zeros((len(Da), len(Db)))
# for i in range(len(Da)):
# k_l = Da[i].shape[0]
# m_dist, m_corr = _kernel_registration(np.concatenate((zeros(( np.int(np.floor((max_l-k_l)/2.)), k_dim)), Da[i], zeros((np.int(np.ceil((max_l-k_l)/2.)), k_dim))), axis=0), Dbe, g)
# for j in range(len(Dbe)):
# gdm[i,j] = m_dist[j, np.unravel_index(np.abs(m_corr[j,:]).argmax(), m_corr[j,:].shape)]
# else:
# # all atoms have the same length, no registration
# gdm = np.zeros((len(D1), len(D2)))
# for i in range(len(D1)):
# for j in range(len(D2)):
# gdm[i,j] = g(D1[i], D2[j])
gdm = _compute_gdm(D1, D2, g)
c = co.matrix(gdm.flatten(order="F"))
G1 = co.spmatrix([], [], [], (len(D1), len(D1) * len(D2)))
G2 = co.spmatrix([], [], [], (len(D2), len(D1) * len(D2)))
G3 = co.spmatrix(-1.0, range(len(D1) * len(D2)), range(len(D1) * len(D2)))
for i in range(len(D1)):
for j in range(len(D2)):
k = j + (i * len(D2))
G1[i, k] = 1.0
G2[j, k] = 1.0
G = co.sparse([G1, G2, G3])
h1 = co.matrix(1.0 / len(D1), (len(D1), 1))
h2 = co.matrix(1.0 / len(D2), (len(D2), 1))
h3 = co.spmatrix([], [], [], (len(D1) * len(D2), 1))
h = co.matrix([h1, h2, h3])
A = co.matrix(1.0, (1, len(D1) * len(D2)))
b = co.matrix([1.0])
co.solvers.options["show_progress"] = False
sol = solv.lp(c, G, h, A, b)
d = sol["primal objective"]
if not scale:
return d
else:
return _scale_metric(gdist, d, D1)
# if (gdist == "chordal" or gdist == "chordal_principal_angles" or
# gdist == "fubinistudy" or gdist == "binetcauchy" or
# gdist == "geodesic"):
# return d/sqrt(D1[0].shape[0])
# elif gdist == "frobenius":
# return d/sqrt(2.)
# else:
# return d
def _multivariate_correlation(s, D):
"""Compute correlation between multivariate atoms
Compute the correlation between a multivariate atome s and dictionary D
as the sum of the correlation in each n_dims dimensions.
"""
n_features = s.shape[0]
n_dims = s.shape[1]
n_kernels = len(D)
corr = np.zeros((n_kernels, n_features))
for k in range(n_kernels): # for all atoms
corrTmp = 0
for j in range(n_dims): # for all dimensions
corrTmp += np.correlate(s[:, j], D[k][:, j])
corr[k, : len(corrTmp)] = corrTmp
return corr
def detection_rate(ref, recov, threshold):
"""Compute the detection rate between reference and recovered dictionaries
The reference ref and the recovered recov are univariate or multivariate
dictionaries. An atom a of the ref dictionary is considered as recovered if
$c < threshold$ with $c = argmax_{r \in R} |<a, r>|$, that is the absolute
value of the maximum correlation between a and any atom r of the recovered
dictionary R is above a given threshold.
The process is iterative and an atom r could be matched only once with an
atom a of the reference dictionary. In other word, each atom a is matched
with a different atom r.
"""
n_kernels_ref, n_kernels_recov = len(ref), len(recov)
n_features = ref[0].shape[0]
if ref[0].ndim == 1:
n_dims = 1
for k in range(n_kernels_ref):
ref[k] = atleast_2d(ref[k]).T
else:
n_dims = ref[0].shape[1]
if recov[0].ndim == 1:
for k in range(n_kernels_recov):
recov[k] = atleast_2d(recov[k]).T
dr = 0
corr = zeros((n_kernels_ref, n_kernels_recov))
for k in range(n_kernels_ref):
c_tmp = _multivariate_correlation(
concatenate(
(zeros((n_features, n_dims)), ref[k], zeros((n_features, n_dims))), axis=0
),
recov,
)
for j in range(n_kernels_recov):
idx_max = argmax(abs(c_tmp[j, :]))
corr[k, j] = c_tmp[j, idx_max]
c_local = np.abs(corr.copy())
for _ in range(n_kernels_ref):
max_corr = c_local.max()
if max_corr >= threshold:
dr += 1
idx_max = np.unravel_index(c_local.argmax(), c_local.shape)
c_local[:, idx_max[1]] = zeros(n_kernels_ref)
c_local[idx_max[0], :] = zeros(n_kernels_recov)
return float(dr) / n_kernels_recov * 100.0
def _convert_array(ref, recov):
if ref[0].ndim == 1:
for k in range(len(ref)):
ref[k] = atleast_2d(ref[k]).T
if recov[0].ndim == 1:
for k in range(len(recov)):
recov[k] = atleast_2d(recov[k]).T
D1 = np.array(ref)
D2 = np.array(recov)
M = D1.shape[0]
N = D1.shape[1]
D1 = D1.reshape((M, N))
D2 = D2.reshape((M, N))
return D1, D2, M
def precision_recall(ref, recov, threshold):
"""Compute precision and recall for recovery experiment"""
D1, D2, M = _convert_array(ref, recov)
corr = D1.dot(D2.T)
precision = float((np.max(corr, axis=0) > threshold).sum()) / float(M)
recall = float((np.max(corr, axis=1) > threshold).sum()) / float(M)
return precision * 100.0, recall * 100.0
def precision_recall_points(ref, recov):
"""Compute the precision and recall for each atom in a recovery experiment"""
# if ref[0].ndim == 1:
# for k in range(len(ref)):
# ref[k] = atleast_2d(ref[k]).T
# if recov[0].ndim == 1:
# for k in range(len(recov)):
# recov[k] = atleast_2d(recov[k]).T
# D1 = np.array(ref)
# D2 = np.array(recov)
# M = D1.shape[0]
# N = D1.shape[1]
# D1 = D1.reshape((M, N))
# D2 = D2.reshape((M, N))
D1, D2, _ = _convert_array(ref, recov)
corr = D1.dot(D2.T)
precision = np.max(corr, axis=0)
recall = np.max(corr, axis=1)
return precision, recall
def beta_dist(D1, D2):
"""Compute the Beta-distance proposed by Skretting and Engan
The beta-distance is:
$\beta(D1, D2)=1/(M1+M2)(\sum_j \beta(D1, d^2_j)+\sum_j \beta(D2, d^1_j))$
with $\beta(D, x) = arccos(\max_i |d^T_i x|/||x||)$
as proposed in:
Karl Skretting and Kjersti Engan,
Learned dictionaries for sparse image representation: properties and results,
SPIE, 2011.
"""
if D1[0].shape != D2[0].shape:
raise ValueError(
f"Dictionaries have different dim : {D1[0].shape} and {D2[0].shape}."
)
D1 = np.array(D1)
M1 = D1.shape[0]
N = D1.shape[1]
D1 = D1.reshape((M1, N))
D2 = np.array(D2)
M2 = D2.shape[0]
D2 = D2.reshape((M2, N))
corr = D1.dot(D2.T)
if np.allclose(np.max(corr, axis=0), ones(M2)) and np.allclose(
np.max(corr, axis=1), ones(M1)
):
return 0.0
return (
np.sum(np.arccos(np.max(corr, axis=0))) + np.sum(np.arccos(np.max(corr, axis=1)))
) / (M1 + M2)
| gpl-3.0 | -3,847,988,425,288,959,000 | 32.763736 | 192 | 0.559371 | false | 2.951489 | false | false | false |
googleapis/python-bigtable | samples/hello/main.py | 1 | 4786 | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites:
- Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster
- Set your Google Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-credentials
"""
import argparse
# [START bigtable_hw_imports]
import datetime
from google.cloud import bigtable
from google.cloud.bigtable import column_family
from google.cloud.bigtable import row_filters
# [END bigtable_hw_imports]
def main(project_id, instance_id, table_id):
# [START bigtable_hw_connect]
# The client must be created with admin=True because it will create a
# table.
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
# [END bigtable_hw_connect]
# [START bigtable_hw_create_table]
print('Creating the {} table.'.format(table_id))
table = instance.table(table_id)
print('Creating column family cf1 with Max Version GC rule...')
# Create a column family with GC policy : most recent N versions
# Define the GC policy to retain only the most recent 2 versions
max_versions_rule = column_family.MaxVersionsGCRule(2)
column_family_id = 'cf1'
column_families = {column_family_id: max_versions_rule}
if not table.exists():
table.create(column_families=column_families)
else:
print("Table {} already exists.".format(table_id))
# [END bigtable_hw_create_table]
# [START bigtable_hw_write_rows]
print('Writing some greetings to the table.')
greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!']
rows = []
column = 'greeting'.encode()
for i, value in enumerate(greetings):
# Note: This example uses sequential numeric IDs for simplicity,
# but this can result in poor performance in a production
# application. Since rows are stored in sorted order by key,
# sequential keys can result in poor distribution of operations
# across nodes.
#
# For more information about how to design a Bigtable schema for
# the best performance, see the documentation:
#
# https://cloud.google.com/bigtable/docs/schema-design
row_key = 'greeting{}'.format(i).encode()
row = table.direct_row(row_key)
row.set_cell(column_family_id,
column,
value,
timestamp=datetime.datetime.utcnow())
rows.append(row)
table.mutate_rows(rows)
# [END bigtable_hw_write_rows]
# [START bigtable_hw_create_filter]
# Create a filter to only retrieve the most recent version of the cell
# for each column accross entire row.
row_filter = row_filters.CellsColumnLimitFilter(1)
# [END bigtable_hw_create_filter]
# [START bigtable_hw_get_with_filter]
print('Getting a single greeting by row key.')
key = 'greeting0'.encode()
row = table.read_row(key, row_filter)
cell = row.cells[column_family_id][column][0]
print(cell.value.decode('utf-8'))
# [END bigtable_hw_get_with_filter]
# [START bigtable_hw_scan_with_filter]
print('Scanning for all greetings:')
partial_rows = table.read_rows(filter_=row_filter)
for row in partial_rows:
cell = row.cells[column_family_id][column][0]
print(cell.value.decode('utf-8'))
# [END bigtable_hw_scan_with_filter]
# [START bigtable_hw_delete_table]
print('Deleting the {} table.'.format(table_id))
table.delete()
# [END bigtable_hw_delete_table]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('project_id', help='Your Cloud Platform project ID.')
parser.add_argument(
'instance_id', help='ID of the Cloud Bigtable instance to connect to.')
parser.add_argument(
'--table',
help='Table to create and destroy.',
default='Hello-Bigtable')
args = parser.parse_args()
main(args.project_id, args.instance_id, args.table)
| apache-2.0 | 4,320,992,909,016,135,700 | 35.815385 | 82 | 0.680318 | false | 3.822684 | false | false | false |
nakednamor/naked-python | Python Crash Course/chapter 9/e7.py | 1 | 1165 | class User():
def __init__(self, first_name, last_name, birthday, sex):
self.first_name = first_name
self.last_name = last_name
self.birthday = birthday
self.sex = sex
def describe_user(self):
print('first name: ' + self.first_name +
'\nlast name: ' + self.last_name +
'\nbirthday: ' + self.birthday +
'\nsex: ' + self.sex)
def greet_user(self):
print('\nhey ' + self.first_name + ', \nhow are you?\n')
class Admin(User):
def __init__(
self,
first_name,
last_name,
birthday,
sex,
privileges=['can add post', 'can delete post', 'can ban user']):
super().__init__(first_name, last_name, birthday, sex)
self.privileges = privileges
def show_privileges(self):
print('privileges: ' + str(self.privileges))
steve = Admin('Steve', 'Mcqueen', '18-04-2000', 'male')
steve.greet_user()
steve.show_privileges()
bob = Admin(
'Bob',
'the Rob',
'23-12-1999',
'male',
['can add post', 'can delete post'])
bob.greet_user()
bob.show_privileges()
| mit | 687,815,997,546,875,500 | 24.326087 | 76 | 0.537339 | false | 3.396501 | false | false | false |
hrabcak/jsbsim | tests/TestEngineIndexedProps.py | 3 | 5832 | # TestEngineIndexedProps.py
#
# Check that indexed properties (where the engine number is replaced by '#) in
# engines XML definition are working.
#
# Copyright (c) 2016 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import shutil
import xml.etree.ElementTree as et
from JSBSim_utils import JSBSimTestCase, CreateFDM, RunTest
class TestEngineIndexedProps(JSBSimTestCase):
def testEnginePowerVC(self):
fdm = CreateFDM(self.sandbox)
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'L4102.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/EnginePowerVC'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/EnginePowerVC'))
while fdm.run():
self.assertAlmostEqual(fdm['propulsion/engine[0]/EnginePowerVC'],
fdm['propulsion/engine[1]/EnginePowerVC'])
def testFunctionWithIndexedProps(self):
tree = et.parse(self.sandbox.path_to_jsbsim_file('engine',
'eng_PegasusXc.xml'))
# Define the function starter-max-power-W as a 'post' function
root = tree.getroot()
startPowFunc_tag = root.find("function/[@name='propulsion/engine[#]/starter-max-power-W']")
startPowFunc_tag.attrib['type']='post'
tree.write('eng_PegasusXc.xml')
# Copy the propeller file.
shutil.copy(self.sandbox.path_to_jsbsim_file('engine', 'prop_deHavilland5000.xml'),
'.')
fdm = CreateFDM(self.sandbox)
fdm.set_engine_path('.')
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'Short_S23_1.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[2]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[3]/starter-max-power-W'))
while fdm.run():
rpm = [fdm['propulsion/engine[0]/engine-rpm'],
fdm['propulsion/engine[1]/engine-rpm'],
fdm['propulsion/engine[2]/engine-rpm'],
fdm['propulsion/engine[3]/engine-rpm']]
for i in xrange(4):
maxPower = max(0.0, 1.0-rpm[i]/400)*498.941*0.10471976*rpm[i]
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-max-power-W' % (i,)],
maxPower)
def testTableWithIndexedVars(self):
tree = et.parse(self.sandbox.path_to_jsbsim_file('engine',
'eng_PegasusXc.xml'))
# Define the function starter-max-power-W as a 'post' function
root = tree.getroot()
startPowFunc_tag = root.find("function/[@name='propulsion/engine[#]/starter-max-power-W']")
startPowFunc_tag.attrib['type']='post'
max_tag = startPowFunc_tag.find('product/max')
diff_tag = max_tag.find('difference')
max_tag.remove(diff_tag)
table_tag = et.SubElement(max_tag,'table')
table_tag.attrib['name']='propulsion/engine[#]/starter-tabular-data'
indepVar_tag = et.SubElement(table_tag, 'independentVar')
indepVar_tag.attrib['lookup']='row'
indepVar_tag.text = 'propulsion/engine[#]/engine-rpm'
tData_tag = et.SubElement(table_tag, 'tableData')
tData_tag.text ='0.0 1.0\n400.0 0.0'
tree.write('eng_PegasusXc.xml')
# Copy the propeller file.
shutil.copy(self.sandbox.path_to_jsbsim_file('engine', 'prop_deHavilland5000.xml'),
'.')
fdm = CreateFDM(self.sandbox)
fdm.set_engine_path('.')
fdm.load_script(self.sandbox.path_to_jsbsim_file('scripts',
'Short_S23_1.xml'))
fdm.run_ic()
pm = fdm.get_property_manager()
self.assertTrue(pm.hasNode('propulsion/engine[0]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[1]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[2]/starter-max-power-W'))
self.assertTrue(pm.hasNode('propulsion/engine[3]/starter-max-power-W'))
while fdm.run():
rpm = [fdm['propulsion/engine[0]/engine-rpm'],
fdm['propulsion/engine[1]/engine-rpm'],
fdm['propulsion/engine[2]/engine-rpm'],
fdm['propulsion/engine[3]/engine-rpm']]
for i in xrange(4):
tabularData = max(0.0, 1.0-rpm[i]/400)
maxPower = tabularData*498.941*0.10471976*rpm[i]
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-max-power-W' % (i,)],
maxPower)
self.assertAlmostEqual(fdm['propulsion/engine[%d]/starter-tabular-data' % (i,)],
tabularData)
RunTest(TestEngineIndexedProps)
| lgpl-2.1 | 4,369,971,170,960,301,600 | 48.008403 | 99 | 0.597394 | false | 3.530266 | true | false | false |
ennoborg/gramps | gramps/plugins/gramplet/persondetails.py | 1 | 11025 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Nick Hall
# Copyright (C) 2013 Heinz Brinker <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Gtk modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventType, EventRoleType
from gramps.gen.plug import Gramplet
from gramps.gui.widgets import Photo
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.display.place import displayer as place_displayer
from gramps.gen.datehandler import get_date
from gramps.gen.utils.file import media_path_full
from gramps.gen.const import COLON, GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
class PersonDetails(Gramplet):
"""
Displays details for a person.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add(self.gui.WIDGET)
self.uistate.connect('nameformat-changed', self.update)
def build_gui(self):
"""
Build the GUI interface.
"""
self.top = Gtk.Box()
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.photo = Photo(self.uistate.screen_height() < 1000)
self.photo.show()
self.name = Gtk.Label(halign=Gtk.Align.START)
self.name.override_font(Pango.FontDescription('sans bold 12'))
vbox.pack_start(self.name, fill=True, expand=False, padding=7)
self.grid = Gtk.Grid(orientation=Gtk.Orientation.VERTICAL)
self.grid.set_column_spacing(10)
vbox.pack_start(self.grid, fill=True, expand=False, padding=5)
vbox.show_all()
self.top.pack_start(self.photo, fill=True, expand=False, padding=5)
self.top.pack_start(vbox, fill=True, expand=True, padding=10)
return self.top
def add_row(self, title, value):
"""
Add a row to the table.
"""
label = Gtk.Label(label=title + COLON, halign=Gtk.Align.END,
valign=Gtk.Align.START)
label.show()
value = Gtk.Label(label=value, halign=Gtk.Align.START)
value.show()
self.grid.add(label)
self.grid.attach_next_to(value, label, Gtk.PositionType.RIGHT, 1, 1)
def clear_grid(self):
"""
Remove all the rows from the grid.
"""
list(map(self.grid.remove, self.grid.get_children()))
def db_changed(self):
self.dbstate.db.connect('person-update', self.update)
def active_changed(self, handle):
self.update()
def update_has_data(self):
"""
Determine if a person has_data by checking:
1. has a birth, baptism, death, or burial event; OR
2. has a father; OR
3. has a mother
"""
active_handle = self.get_active('Person')
has_data = False
if active_handle:
active_person = self.dbstate.db.get_person_from_handle(active_handle)
if active_person:
for event_type in [EventType(EventType.BIRTH),
EventType(EventType.BAPTISM),
EventType(EventType.DEATH),
EventType(EventType.BURIAL)]:
event = self.get_event(active_person, event_type)
if event:
has_data = True
break
if not has_data:
family_handle = active_person.get_main_parents_family_handle()
if family_handle:
family = self.dbstate.db.get_family_from_handle(family_handle)
handle = family.get_father_handle()
if handle:
if self.dbstate.db.get_person_from_handle(handle):
has_data = True
else:
handle = family.get_mother_handle()
if handle:
if self.dbstate.db.get_person_from_handle(handle):
has_data = True
self.set_has_data(has_data)
def main(self): # return false finishes
self.display_empty()
active_handle = self.get_active('Person')
if active_handle:
active_person = self.dbstate.db.get_person_from_handle(active_handle)
self.top.hide()
if active_person:
self.display_person(active_person)
self.top.show()
self.update_has_data()
def display_person(self, active_person):
"""
Display details of the active person.
"""
self.load_person_image(active_person)
self.name.set_text(name_displayer.display(active_person))
self.clear_grid()
self.display_alternate_names(active_person)
self.display_parents(active_person)
self.display_separator()
self.display_type(active_person, EventType(EventType.BIRTH))
self.display_type(active_person, EventType(EventType.BAPTISM))
self.display_type(active_person, EventType(EventType.DEATH))
self.display_type(active_person, EventType(EventType.BURIAL))
self.display_separator()
self.display_attribute(active_person, _('Occupation'))
self.display_attribute(active_person, _('Title'))
self.display_attribute(active_person, _('Religion'))
def display_empty(self):
"""
Display empty details when no person is selected.
"""
self.photo.set_image(None)
self.photo.set_uistate(None, None)
self.name.set_text(_('No active person'))
self.clear_grid()
def display_separator(self):
"""
Display an empty row to separate groupd of entries.
"""
label = Gtk.Label(label='')
label.override_font(Pango.FontDescription('sans 4'))
label.show()
self.grid.add(label)
def display_alternate_names(self, active_person):
"""
Display other names of the person
"""
try:
nlist = active_person.get_alternate_names()
if len(nlist) > 0:
for altname in nlist:
name_type = str(altname.get_type())
text = name_displayer.display_name(altname)
self.add_row(name_type, text)
self.display_separator()
except:
pass
def display_parents(self, active_person):
"""
Display the parents of the active person.
"""
family_handle = active_person.get_main_parents_family_handle()
if family_handle:
family = self.dbstate.db.get_family_from_handle(family_handle)
handle = family.get_father_handle()
if handle:
father = self.dbstate.db.get_person_from_handle(handle)
father_name = name_displayer.display(father)
else:
father_name = _('Unknown')
handle = family.get_mother_handle()
if handle:
mother = self.dbstate.db.get_person_from_handle(handle)
mother_name = name_displayer.display(mother)
else:
mother_name = _('Unknown')
else:
father_name = _('Unknown')
mother_name = _('Unknown')
self.add_row(_('Father'), father_name)
self.add_row(_('Mother'), mother_name)
def display_attribute(self, active_person, attr_key):
"""
Display an attribute row.
"""
values = []
for attr in active_person.get_attribute_list():
if attr.get_type() == attr_key:
values.append(attr.get_value())
if values:
self.add_row(attr_key, _(', ').join(values))
def display_type(self, active_person, event_type):
"""
Display an event type row.
"""
event = self.get_event(active_person, event_type)
if event:
self.add_row(str(event_type), self.format_event(event))
def get_event(self, person, event_type):
"""
Return an event of the given type.
"""
for event_ref in person.get_event_ref_list():
if int(event_ref.get_role()) == EventRoleType.PRIMARY:
event = self.dbstate.db.get_event_from_handle(event_ref.ref)
if event.get_type() == event_type:
return event
return None
def format_event(self, event):
"""
Format the event for display.
"""
date = get_date(event)
handle = event.get_place_handle()
if handle:
place = place_displayer.display_event(self.dbstate.db, event)
retval = _('%(date)s - %(place)s.') % {'date' : date,
'place' : place}
else:
retval = _('%(date)s.') % dict(date=date)
return retval
def load_person_image(self, person):
"""
Load the primary image if it exists.
"""
media_list = person.get_media_list()
if media_list:
media_ref = media_list[0]
object_handle = media_ref.get_reference_handle()
obj = self.dbstate.db.get_media_from_handle(object_handle)
full_path = media_path_full(self.dbstate.db, obj.get_path())
mime_type = obj.get_mime_type()
if mime_type and mime_type.startswith("image"):
self.photo.set_image(full_path, mime_type,
media_ref.get_rectangle())
self.photo.set_uistate(self.uistate, object_handle)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
else:
self.photo.set_image(None)
self.photo.set_uistate(None, None)
| gpl-2.0 | 8,619,502,784,011,851,000 | 37.548951 | 86 | 0.557642 | false | 4.032553 | false | false | false |
mscuthbert/abjad | abjad/tools/tonalanalysistools/test/test_tonalanalysistools_TonalAnalysisAgent_analyze_chords.py | 2 | 4233 | # -*- encoding: utf-8 -*-
from abjad import *
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_01():
r'''The three inversions of a C major triad.
'''
chord = Chord([0, 4, 7], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 12], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'major', 'triad', 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_02():
r'''The three inversions of an a minor triad.
'''
chord = Chord([9, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([12, 16, 21], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([16, 21, 24], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('a', 'minor', 'triad', 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_03():
r'''The four inversions of a C dominant seventh chord.
'''
chord = Chord([0, 4, 7, 10], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 10, 12], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 10, 12, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([10, 12, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 7, 3)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_04():
r'''The five inversions of a C dominant ninth chord.
'''
chord = Chord([0, 4, 7, 10, 14], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 'root')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([4, 7, 10, 12, 14], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 1)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([7, 10, 12, 14, 16], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 2)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([10, 12, 14, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 3)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
chord = Chord([2, 10, 12, 16, 19], (1, 4))
chord_class = tonalanalysistools.RootedChordClass('c', 'dominant', 9, 4)
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [chord_class]
def test_tonalanalysistools_TonalAnalysisAgent_analyze_chords_05():
r'''Returns none when chord does not analyze.
'''
chord = Chord('<c cs d>4')
selection = tonalanalysistools.select(chord)
assert selection.analyze_chords() == [None] | gpl-3.0 | -6,901,460,588,497,738,000 | 38.943396 | 84 | 0.672573 | false | 2.899315 | false | false | false |
HMRecord/website | python/tests.py | 1 | 5702 | import os
import main
import unittest
import tempfile
from bson.json_util import loads, dumps
from pymongo import MongoClient
import base64
import copy
INVALID_ARTICLE = {"title": "B", "content": "afds", "sectionID": "23", "staffIDs": ["69"], "date": "Blah"}
STAFF = {"name": "Michael Truell", "position": "CTO"}
SECTION = {"title": "Sports"}
CORRECT_USERNAME = "admin"
CORRECT_PASSWORD = "d"
def getValidArticle(db):
staffs = [a for a in db.staff.find(STAFF)]
sections = [a for a in db.section.find(SECTION)]
if len(staffs) == 0:
db.staff.insert_one(copy.deepcopy(STAFF))
staffs = [a for a in db.staff.find(STAFF)]
if len(sections) == 0:
db.section.insert_one(copy.deepcopy(SECTION))
sections = [a for a in db.section.find(SECTION)]
sectionID = sections[0]['_id']
staffID = staffs[0]['_id']
return {"title": "Article Title", "content": "Article content goes here.", "date": "May 28, 2016", "sectionID": sectionID, "staffIDs": [staffID]}
def getAuthHeader(username, password):
return {"Authorization": "Basic "+base64.b64encode((username+":"+password).encode("utf-8")).decode("utf-8")}
class APITester(unittest.TestCase):
def setUp(self):
client = MongoClient()
client.drop_database("testRecord")
self.db = client.testRecord
main.initDB(self.db)
self.db_fd, main.app.config['DATABASE'] = tempfile.mkstemp()
main.app.config['TESTING'] = True
self.app = main.app.test_client()
def tearDown(self):
os.close(self.db_fd)
os.unlink(main.app.config['DATABASE'])
def queryGET(self, endpointName, data={}):
request = self.app.get(endpointName, data=data)
return request.data.decode("utf-8")
def queryPOST(self, endpointName, data={}):
header = getAuthHeader(CORRECT_USERNAME, CORRECT_PASSWORD)
contentType = 'application/json'
request = self.app.post(endpointName, data=dumps(data), content_type=contentType, headers=header)
return request.data.decode("utf-8")
def testEmptyDB(self):
endpoints = ['article', 'staff', 'section']
for endpoint in endpoints:
assert '[]' in str(self.app.get('/api/'+endpoint).data)
def testGETInvalidArticle(self):
self.db.article.insert_one(copy.deepcopy(INVALID_ARTICLE))
assert '[]' == self.queryGET('/api/article')
def testGETValidArticle(self):
def isSameAricle(article1, article2):
for field in list(article1.keys())+list(article2.keys()):
if field not in ['_id', 'section', 'staffs']:
if article1[field] != article2[field]:
return False
return True
validArticle = getValidArticle(self.db)
self.db.article.insert_one(validArticle)
returnedArticle = loads(self.queryGET('/api/article'))[0]
assert isSameAricle(validArticle, returnedArticle)
returnedArticle = loads(self.queryGET('/api/article', data={"sectionID": validArticle['sectionID']}))[0]
assert isSameAricle(validArticle, returnedArticle)
returnedArticle = loads(self.queryGET('/api/article', data={"title": validArticle['title']}))[0]
assert isSameAricle(validArticle, returnedArticle)
def testPOSTArticle(self):
# Should fail with bad object ids
try:
self.queryPOST("/api/admin/article", data=INVALID_ARTICLE)
assert False
except:
pass
# Should store data and return good when given valid article
assert self.queryPOST("/api/admin/article", data=getValidArticle(self.db)) == 'good'
assert self.db.article.find_one(getValidArticle(self.db)) is not None
def testGETStaff(self):
def isSameStaff(staff1, staff2):
for field in list(staff1.keys())+list(staff2.keys()):
if field != '_id':
if staff1[field] != staff2[field]:
return False
return True
modifiableStaff = copy.deepcopy(STAFF)
self.db.staff.insert_one(modifiableStaff)
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff'))[0])
print("id")
print(self.queryGET('/api/staff', data={"staffID": str(modifiableStaff['_id'])}))
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff', data={"staffID": str(modifiableStaff['_id'])}))[0])
assert isSameStaff(STAFF, loads(self.queryGET('/api/staff', data={"name": STAFF['name']}))[0])
def testPOSTStaff(self):
assert self.queryPOST("/api/admin/staff", data=STAFF) == 'good'
assert self.db.staff.find_one(STAFF) is not None
def testGETSection(self):
self.db.section.insert_one(copy.deepcopy(SECTION))
returnedSection = loads(self.queryGET('/api/section'))[0]
for field in list(returnedSection.keys())+list(SECTION.keys()):
if field != '_id':
assert SECTION[field] == returnedSection[field]
def testPOSTSection(self):
assert self.queryPOST("/api/admin/section", data=SECTION) == 'good'
assert self.db.section.find_one(SECTION) is not None
def testAdminAccess(self):
def request(username, password):
headers = getAuthHeader(username, password)
return self.app.post("/api/admin/article", headers=headers).data.decode("utf-8")
assert request(CORRECT_USERNAME, CORRECT_PASSWORD) == 'Bad request'
assert request(CORRECT_USERNAME, "wrong") == 'Unauthorized access'
assert request("wrong", CORRECT_PASSWORD) == 'Unauthorized access'
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 3,801,653,387,325,009,000 | 37.527027 | 149 | 0.631182 | false | 3.721932 | true | false | false |
openmotics/gateway | src/gateway/mappers/pulse_counter.py | 1 | 2003 | # Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
PulseCounter Mapper
"""
from __future__ import absolute_import
from gateway.dto import PulseCounterDTO
from gateway.models import PulseCounter
if False: # MYPY
from typing import List
class PulseCounterMapper(object):
@staticmethod
def orm_to_dto(orm_object): # type: (PulseCounter) -> PulseCounterDTO
return PulseCounterDTO(id=orm_object.number,
name=orm_object.name,
persistent=orm_object.persistent,
room=None if orm_object.room is None else orm_object.room.number)
@staticmethod
def dto_to_orm(pulse_counter_dto): # type: (PulseCounterDTO) -> PulseCounter
pulse_counter = PulseCounter.get_or_none(number=pulse_counter_dto.id)
if pulse_counter is None:
pulse_counter = PulseCounter(number=pulse_counter_dto.id,
name='',
source='gateway',
persistent=False)
if 'name' in pulse_counter_dto.loaded_fields:
pulse_counter.name = pulse_counter_dto.name
if 'persistent' in pulse_counter_dto.loaded_fields:
pulse_counter.persistent = pulse_counter_dto.persistent
return pulse_counter
| agpl-3.0 | -6,871,252,857,676,801,000 | 40.729167 | 96 | 0.652521 | false | 4.104508 | false | false | false |
AndreLesa/simplesmsforms | simplesmsforms/smsform_fields.py | 1 | 5350 | import datetime
import re
from smsform_exceptions import (SMSFieldException, ChoiceException, InvalidDateException,
MissingRequiredFieldException)
from smsform_validators import multiple_choice_validator, single_choice_validator
# SMS FIELD
class GenericSMSField(object):
empty_values = [None, [], ""]
def __init__(self, name, *args, **kwargs):
self.name = name
self.validators = kwargs.get('validators') or []
self.prefixes = kwargs.get("prefixes") or [""]
self.prefixes.sort(key=len, reverse=True)#Longest prefix should come first
self.accepted_prefix = ""
required = kwargs.get("required", "blank")
if required == "blank":
self.required = True
else:
self.required = False
def get_field_regex(self):
"""Return a dict of 'prefix':prefix and regex:regex"""
prefix_regexes = []
for prefix in self.prefixes:
prefix_regex = r"\b{prefix}(?P<{name}>\w*)".format(
prefix=prefix,
name=self.name
)
prefix_regexes.append({"prefix": prefix, "regex": prefix_regex})
return prefix_regexes
def get_verbose_name(self):
name_parts = self.name.split("_")
return " ".join(name_parts).title()
def to_python(self, text, accepted_prefix=""):
"""Convert the passed in text to a valid python object, any special
conversions from the passed in text to a valid python object should
happen here."""
self.accepted_prefix = self.accepted_prefix or accepted_prefix
return text, accepted_prefix
def validate(self, value):
# check to see if the field is required and present
if self.required and value in self.empty_values:
raise MissingRequiredFieldException(self.get_verbose_name())
for validator in self.validators:
try:
validator(value=value)
except SMSFieldException, e:
raise
return True
def process_field(self, text, accepted_prefix=""):
# Try to split into text and the accepted prefix
python_obj, accepted_prefix = self.to_python(text, accepted_prefix)
self.validate(python_obj)
return python_obj
def __repr__(self):
return "<{name}> object".format(name=self.name)
# SMSFields
class PrefixField(GenericSMSField):
"""This field is for the special fields that have a first letter followed by
the actual data. This class just strips out that first letter"""
pass
class MultiChoiceField(GenericSMSField):
def __init__(self, choices, choice_divider=",", *args, **kwargs):
self.choice_divider = choice_divider
self.choices = choices
super(MultiChoiceField, self).__init__(*args, **kwargs)
self.validators.append(multiple_choice_validator)
def to_python(self, text, accepted_prefix):
text, accepted_prefix = super(
MultiChoiceField, self).to_python(text, accepted_prefix)
return text.split(self.choice_divider), accepted_prefix
def get_field_regex(self):
choices_string = "|".join(self.choices)
return [
{
"prefix": "", "regex": "({choices_string})".format(choices_string=choices_string)
}
]
def validate(self, value):
# check to see if the field is required and present
if self.required and value in self.empty_values:
raise MissingRequiredFieldException(self.get_verbose_name())
for validator in self.validators:
try:
validator(value=value, choices=self.choices)
except SMSFieldException, e:
raise
return True
class SingleChoiceField(MultiChoiceField):
def __init__(self, choices, *args, **kwargs):
super(SingleChoiceField, self).__init__(choices, *args, **kwargs)
self.validators = [single_choice_validator]
class DateField(GenericSMSField):
def __init__(self, name, *args, **kwargs):
date_formats = kwargs.get("date_formats", None) or ["%d/%b/%y", "%d%b%y"]
super(DateField, self).__init__(name, *args, **kwargs)
self.date_formats = date_formats
def get_field_regex(self):
"""We will accept 2 formats for the dates: dayMonthYear, day/Month/Year
with the month acceptable as a word or digits
"""
regex_strings = [
r"\b\d{1,2}[-/]\d{1,2}[-/]\d{1,4}\b",
r"\b\d{1,2}[a-z]{3,14}\d{1,4}\b",
]
return [
{
"prefix": "", "regex": "{regex_strings}".format(regex_strings="|".join(regex_strings), name=self.name)
}
]
def to_python(self, date_string, accepted_prefix=""):
python_date = None
for date_format in self.date_formats:
try:
python_date = datetime.datetime.strptime(
date_string, date_format)
except ValueError:
continue
else:
break
if not python_date:
raise InvalidDateException(
"Date not recognized, please use the format: dayMonthYear"
)
return python_date.date(), accepted_prefix
| mit | 5,258,480,903,507,007,000 | 32.229814 | 118 | 0.593458 | false | 4.202671 | false | false | false |
purpleidea/puppet-cobbler | files/loaders/get-loaders.py | 1 | 1375 | #!/usr/bin/python
# get the loaders ahead of time so that cobbler can run in an isolated network!
# NOTE: see cobbler/action_dlcontent.py for the list of files it looks for...
import os
import sys
import urlgrabber
force = True
content_server = 'http://cobbler.github.com/loaders'
#dest = '/var/lib/cobbler/loaders'
dest = os.getcwd()
files = (
("%s/README" % content_server, "%s/README" % dest),
("%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest),
("%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
("%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
("%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest),
("%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
("%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest),
("%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest),
("%s/grub-0.97-x86.efi" % content_server, "%s/grub-x86.efi" % dest),
("%s/grub-0.97-x86_64.efi" % content_server, "%s/grub-x86_64.efi" % dest),
)
print "Script will download to: %s from: %s" % (dest, content_server)
try:
raw_input('<ENTER>/^C ?')
except KeyboardInterrupt, e:
sys.exit(1)
for src, dst in files:
if os.path.exists(dst) and not force:
print "File: %s already exists." % dst
continue
print "Downloading: %s to: %s" % (src, dst)
urlgrabber.grabber.urlgrab(src, filename=dst)
| agpl-3.0 | 5,363,835,476,370,518,000 | 34.25641 | 79 | 0.660364 | false | 2.45975 | false | false | false |
enochd/RMG-Py | rmgpy/data/solvation.py | 1 | 36641 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
"""
import os.path
import math
import logging
from copy import deepcopy
from base import Database, Entry, makeLogicNode, DatabaseError
from rmgpy.molecule import Molecule, Atom, Bond, Group, atomTypes
################################################################################
def saveEntry(f, entry):
"""
Write a Pythonic string representation of the given `entry` in the solvation
database to the file object `f`.
"""
f.write('entry(\n')
f.write(' index = {0:d},\n'.format(entry.index))
f.write(' label = "{0}",\n'.format(entry.label))
if isinstance(entry.item, Molecule):
if Molecule(SMILES=entry.item.toSMILES()).isIsomorphic(entry.item):
# The SMILES representation accurately describes the molecule, so we can save it that way.
f.write(' molecule = "{0}",\n'.format(entry.item.toSMILES()))
else:
f.write(' molecule = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList(removeH=False))
f.write('""",\n')
elif isinstance(entry.item, Group):
f.write(' group = \n')
f.write('"""\n')
f.write(entry.item.toAdjacencyList())
f.write('""",\n')
elif entry.item is not None:
f.write(' group = "{0}",\n'.format(entry.item))
if isinstance(entry.data, SoluteData):
f.write(' solute = SoluteData(\n')
f.write(' S = {0!r},\n'.format(entry.data.S))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' L = {0!r},\n'.format(entry.data.L))
f.write(' A = {0!r},\n'.format(entry.data.A))
if entry.data.V is not None: f.write(' V = {0!r},\n'.format(entry.data.V))
f.write(' ),\n')
elif isinstance(entry.data, SolventData):
f.write(' solvent = SolventData(\n')
f.write(' s_g = {0!r},\n'.format(entry.data.s_g))
f.write(' b_g = {0!r},\n'.format(entry.data.b_g))
f.write(' e_g = {0!r},\n'.format(entry.data.e_g))
f.write(' l_g = {0!r},\n'.format(entry.data.l_g))
f.write(' a_g = {0!r},\n'.format(entry.data.a_g))
f.write(' c_g = {0!r},\n'.format(entry.data.c_g))
f.write(' s_h = {0!r},\n'.format(entry.data.s_h))
f.write(' b_h = {0!r},\n'.format(entry.data.b_h))
f.write(' e_h = {0!r},\n'.format(entry.data.e_h))
f.write(' l_h = {0!r},\n'.format(entry.data.l_h))
f.write(' a_h = {0!r},\n'.format(entry.data.a_h))
f.write(' c_h = {0!r},\n'.format(entry.data.c_h))
f.write(' A = {0!r},\n'.format(entry.data.A))
f.write(' B = {0!r},\n'.format(entry.data.B))
f.write(' C = {0!r},\n'.format(entry.data.C))
f.write(' D = {0!r},\n'.format(entry.data.D))
f.write(' E = {0!r},\n'.format(entry.data.E))
f.write(' alpha = {0!r},\n'.format(entry.data.alpha))
f.write(' beta = {0!r},\n'.format(entry.data.beta))
f.write(' eps = {0!r},\n'.format(entry.data.eps))
f.write(' ),\n')
elif entry.data is None:
f.write(' solute = None,\n')
else:
raise DatabaseError("Not sure how to save {0!r}".format(entry.data))
f.write(' shortDesc = u"""')
try:
f.write(entry.shortDesc.encode('utf-8'))
except:
f.write(entry.shortDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(' longDesc = \n')
f.write('u"""\n')
try:
f.write(entry.longDesc.strip().encode('utf-8') + "\n")
except:
f.write(entry.longDesc.strip().encode('ascii', 'ignore')+ "\n")
f.write('""",\n')
f.write(')\n\n')
def generateOldLibraryEntry(data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
raise NotImplementedError()
def processOldLibraryEntry(data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
raise NotImplementedError()
class SolventData():
"""
Stores Abraham/Mintz parameters for characterizing a solvent.
"""
def __init__(self, s_h=None, b_h=None, e_h=None, l_h=None, a_h=None,
c_h=None, s_g=None, b_g=None, e_g=None, l_g=None, a_g=None, c_g=None, A=None, B=None,
C=None, D=None, E=None, alpha=None, beta=None, eps=None):
self.s_h = s_h
self.b_h = b_h
self.e_h = e_h
self.l_h = l_h
self.a_h = a_h
self.c_h = c_h
self.s_g = s_g
self.b_g = b_g
self.e_g = e_g
self.l_g = l_g
self.a_g = a_g
self.c_g = c_g
# These are parameters for calculating viscosity
self.A = A
self.B = B
self.C = C
self.D = D
self.E = E
# These are SOLUTE parameters used for intrinsic rate correction in H-abstraction rxns
self.alpha = alpha
self.beta = beta
# This is the dielectric constant
self.eps = eps
def getHAbsCorrection(self):
"""
If solvation is on, this will give the log10 of the ratio of the intrinsic rate
constants log10(k_sol/k_gas) for H-abstraction rxns
"""
return -8.3*self.alpha*self.beta
def getSolventViscosity(self, T):
"""
Returns the viscosity in Pa s, according to correlation in Perry's Handbook
and coefficients in DIPPR
"""
return math.exp(self.A + (self.B / T) + (self.C*math.log(T)) + (self.D * (T**self.E)))
class SolvationCorrection():
"""
Stores corrections for enthalpy, entropy, and Gibbs free energy when a species is solvated.
Enthalpy and Gibbs free energy is in J/mol; entropy is in J/mol/K
"""
def __init__(self, enthalpy=None, gibbs=None, entropy=None):
self.enthalpy = enthalpy
self.entropy = entropy
self.gibbs = gibbs
class SoluteData():
"""
Stores Abraham parameters to characterize a solute
"""
def __init__(self, S=None, B=None, E=None, L=None, A=None, V=None, comment=""):
self.S = S
self.B = B
self.E = E
self.L = L
self.A = A
self.V = V
self.comment = comment
def __repr__(self):
return "SoluteData(S={0},B={1},E={2},L={3},A={4},comment={5!r})".format(self.S, self.B, self.E, self.L, self.A, self.comment)
def getStokesDiffusivity(self, T, solventViscosity):
"""
Get diffusivity of solute using the Stokes-Einstein sphere relation. Radius is
found from the McGowan volume.
"""
k_b = 1.3806488e-23 # m2*kg/s2/K
radius = math.pow((75*self.V/3.14159),(1.0/3.0))/100 # in meters
D = k_b*T/6/3.14159/solventViscosity/radius # m2/s
return D
def setMcGowanVolume(self, species):
"""
Find and store the McGowan's Volume
Returned volumes are in cm^3/mol/100 (see note below)
See Table 2 in Abraham & McGowan, Chromatographia Vol. 23, No. 4, p. 243. April 1987
doi: 10.1007/BF02311772
"V is scaled to have similar values to the other
descriptors by division by 100 and has units of (cm3mol−1/100)."
the contibutions in this function are in cm3/mol, and the division by 100 is done at the very end.
"""
molecule = species.molecule[0] # any will do, use the first.
Vtot = 0
for atom in molecule.atoms:
thisV = 0.0
if atom.isCarbon():
thisV = 16.35
elif (atom.element.number == 7): # nitrogen, do this way if we don't have an isElement method
thisV = 14.39
elif atom.isOxygen():
thisV = 12.43
elif atom.isHydrogen():
thisV = 8.71
elif (atom.element.number == 16):
thisV = 22.91
else:
raise Exception()
Vtot = Vtot + thisV
for bond in molecule.getBonds(atom):
# divide contribution in half since all bonds would be counted twice this way
Vtot = Vtot - 6.56/2
self.V= Vtot / 100; # division by 100 to get units correct.
################################################################################
################################################################################
class SolventLibrary(Database):
"""
A class for working with a RMG solvent library.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
solvent,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
self.entries[label] = Entry(
index = index,
label = label,
data = solvent,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solvent library from the given path
"""
Database.load(self, path, local_context={'SolventData': SolventData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def getSolventData(self, label):
"""
Get a solvent's data from its name
"""
return self.entries[label].data
class SoluteLibrary(Database):
"""
A class for working with a RMG solute library. Not currently used.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
molecule,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
try:
mol = Molecule(SMILES=molecule)
except:
try:
mol = Molecule().fromAdjacencyList(molecule)
except:
logging.error("Can't understand '{0}' in solute library '{1}'".format(molecule,self.name))
raise
self.entries[label] = Entry(
index = index,
label = label,
item = mol,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def load(self, path):
"""
Load the solute library from the given path
"""
Database.load(self, path, local_context={'SoluteData': SoluteData}, global_context={})
def saveEntry(self, f, entry):
"""
Write the given `entry` in the solute database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SoluteGroups(Database):
"""
A class for working with an RMG solute group additivity database.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def loadEntry(self,
index,
label,
group,
solute,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
):
if group[0:3].upper() == 'OR{' or group[0:4].upper() == 'AND{' or group[0:7].upper() == 'NOT OR{' or group[0:8].upper() == 'NOT AND{':
item = makeLogicNode(group)
else:
item = Group().fromAdjacencyList(group)
self.entries[label] = Entry(
index = index,
label = label,
item = item,
data = solute,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
)
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def generateOldLibraryEntry(self, data):
"""
Return a list of values used to save entries to the old-style RMG
thermo database based on the thermodynamics object `data`.
"""
return generateOldLibraryEntry(data)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding thermodynamics object.
"""
return processOldLibraryEntry(data)
################################################################################
class SolvationDatabase(object):
"""
A class for working with the RMG solvation database.
"""
def __init__(self):
self.libraries = {}
self.libraries['solvent'] = SolventLibrary()
self.libraries['solute'] = SoluteLibrary()
self.groups = {}
self.local_context = {
'SoluteData': SoluteData,
'SolventData': SolventData
}
self.global_context = {}
def __reduce__(self):
"""
A helper function used when pickling a SolvationDatabase object.
"""
d = {
'libraries': self.libraries,
'groups': self.groups,
'libraryOrder': self.libraryOrder,
}
return (SolvationDatabase, (), d)
def __setstate__(self, d):
"""
A helper function used when unpickling a SolvationDatabase object.
"""
self.libraries = d['libraries']
self.groups = d['groups']
self.libraryOrder = d['libraryOrder']
def load(self, path, libraries=None, depository=True):
"""
Load the solvation database from the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
Load the solvent and solute libraries, then the solute groups.
"""
self.libraries['solvent'].load(os.path.join(path,'libraries','solvent.py'))
self.libraries['solute'].load(os.path.join(path,'libraries','solute.py'))
self.loadGroups(os.path.join(path, 'groups'))
def getSolventData(self, solvent_name):
try:
solventData = self.libraries['solvent'].getSolventData(solvent_name)
except:
raise DatabaseError('Solvent {0!r} not found in database'.format(solvent_name))
return solventData
def loadGroups(self, path):
"""
Load the solute database from the given `path` on disk, where `path`
points to the top-level folder of the solute database.
Three sets of groups for additivity, atom-centered ('abraham'), non atom-centered
('nonacentered'), and radical corrections ('radical')
"""
logging.info('Loading Platts additivity group database from {0}...'.format(path))
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham').load(os.path.join(path, 'abraham.py' ), self.local_context, self.global_context)
self.groups['nonacentered'] = SoluteGroups(label='nonacentered').load(os.path.join(path, 'nonacentered.py' ), self.local_context, self.global_context)
self.groups['radical'] = SoluteGroups(label='radical').load(os.path.join(path, 'radical.py' ), self.local_context, self.global_context)
def save(self, path):
"""
Save the solvation database to the given `path` on disk, where `path`
points to the top-level folder of the solvation database.
"""
path = os.path.abspath(path)
if not os.path.exists(path): os.mkdir(path)
self.saveLibraries(os.path.join(path, 'libraries'))
self.saveGroups(os.path.join(path, 'groups'))
def saveLibraries(self, path):
"""
Save the solute libraries to the given `path` on disk, where `path`
points to the top-level folder of the solute libraries.
"""
if not os.path.exists(path): os.mkdir(path)
for library in self.libraries.keys():
self.libraries[library].save(os.path.join(path, library+'.py'))
def saveGroups(self, path):
"""
Save the solute groups to the given `path` on disk, where `path`
points to the top-level folder of the solute groups.
"""
if not os.path.exists(path): os.mkdir(path)
for group in self.groups.keys():
self.groups[group].save(os.path.join(path, group+'.py'))
def loadOld(self, path):
"""
Load the old RMG solute database from the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
for (root, dirs, files) in os.walk(os.path.join(path, 'thermo_libraries')):
if os.path.exists(os.path.join(root, 'Dictionary.txt')) and os.path.exists(os.path.join(root, 'Library.txt')):
library = SoluteLibrary(label=os.path.basename(root), name=os.path.basename(root))
library.loadOld(
dictstr = os.path.join(root, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(root, 'Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = False,
)
library.label = os.path.basename(root)
self.libraries[library.label] = library
self.groups = {}
self.groups['abraham'] = SoluteGroups(label='abraham', name='Platts Group Additivity Values for Abraham Solute Descriptors').loadOld(
dictstr = os.path.join(path, 'thermo_groups', 'Abraham_Dictionary.txt'),
treestr = os.path.join(path, 'thermo_groups', 'Abraham_Tree.txt'),
libstr = os.path.join(path, 'thermo_groups', 'Abraham_Library.txt'),
numParameters = 5,
numLabels = 1,
pattern = True,
)
def saveOld(self, path):
"""
Save the old RMG Abraham database to the given `path` on disk, where
`path` points to the top-level folder of the old RMG database.
"""
# Depository not used in old database, so it is not saved
librariesPath = os.path.join(path, 'thermo_libraries')
if not os.path.exists(librariesPath): os.mkdir(librariesPath)
for library in self.libraries.values():
libraryPath = os.path.join(librariesPath, library.label)
if not os.path.exists(libraryPath): os.mkdir(libraryPath)
library.saveOld(
dictstr = os.path.join(libraryPath, 'Dictionary.txt'),
treestr = '',
libstr = os.path.join(libraryPath, 'Library.txt'),
)
groupsPath = os.path.join(path, 'thermo_groups')
if not os.path.exists(groupsPath): os.mkdir(groupsPath)
self.groups['abraham'].saveOld(
dictstr = os.path.join(groupsPath, 'Abraham_Dictionary.txt'),
treestr = os.path.join(groupsPath, 'Abraham_Tree.txt'),
libstr = os.path.join(groupsPath, 'Abraham_Library.txt'),
)
def getSoluteData(self, species):
"""
Return the solute descriptors for a given :class:`Species`
object `species`. This function first searches the loaded libraries
in order, returning the first match found, before falling back to
estimation via Platts group additivity.
"""
soluteData = None
# Check the library first
soluteData = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if soluteData is not None:
assert len(soluteData)==3, "soluteData should be a tuple (soluteData, library, entry)"
soluteData[0].comment += "Data from solute library"
soluteData = soluteData[0]
else:
# Solute not found in any loaded libraries, so estimate
soluteData = self.getSoluteDataFromGroups(species)
# No Platts group additivity for V, so set using atom sizes
soluteData.setMcGowanVolume(species)
# Return the resulting solute parameters S, B, E, L, A
return soluteData
def getAllSoluteData(self, species):
"""
Return all possible sets of Abraham solute descriptors for a given
:class:`Species` object `species`. The hits from the library come
first, then the group additivity estimate. This method is useful
for a generic search job. Right now, there should either be 1 or
2 sets of descriptors, depending on whether or not we have a
library entry.
"""
soluteDataList = []
# Data from solute library
data = self.getSoluteDataFromLibrary(species, self.libraries['solute'])
if data is not None:
assert len(data) == 3, "soluteData should be a tuple (soluteData, library, entry)"
data[0].comment += "Data from solute library"
soluteDataList.append(data)
# Estimate from group additivity
# Make it a tuple
data = (self.getSoluteDataFromGroups(species), None, None)
soluteDataList.append(data)
return soluteDataList
def getSoluteDataFromLibrary(self, species, library):
"""
Return the set of Abraham solute descriptors corresponding to a given
:class:`Species` object `species` from the specified solute
`library`. If `library` is a string, the list of libraries is searched
for a library with that name. If no match is found in that library,
``None`` is returned. If no corresponding library is found, a
:class:`DatabaseError` is raised.
"""
for label, entry in library.entries.iteritems():
for molecule in species.molecule:
if molecule.isIsomorphic(entry.item) and entry.data is not None:
return (deepcopy(entry.data), library, entry)
return None
def getSoluteDataFromGroups(self, species):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Species` object `species` by estimation using the Platts group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
It averages (linearly) over the desciptors for each Molecule (resonance isomer)
in the Species.
"""
soluteData = SoluteData(0.0,0.0,0.0,0.0,0.0)
count = 0
comments = []
for molecule in species.molecule:
molecule.clearLabeledAtoms()
molecule.updateAtomTypes()
sdata = self.estimateSoluteViaGroupAdditivity(molecule)
soluteData.S += sdata.S
soluteData.B += sdata.B
soluteData.E += sdata.E
soluteData.L += sdata.L
soluteData.A += sdata.A
count += 1
comments.append(sdata.comment)
soluteData.S /= count
soluteData.B /= count
soluteData.E /= count
soluteData.L /= count
soluteData.A /= count
# Print groups that are used for debugging purposes
soluteData.comment = "Average of {0}".format(" and ".join(comments))
return soluteData
def transformLonePairs(self, molecule):
"""
Changes lone pairs in a molecule to two radicals for purposes of finding
solute data via group additivity. Transformed for each atom based on valency.
"""
saturatedStruct = molecule.copy(deep=True)
addedToPairs = {}
for atom in saturatedStruct.atoms:
addedToPairs[atom] = 0
if atom.lonePairs > 0:
charge = atom.charge # Record this so we can conserve it when checking
bonds = saturatedStruct.getBonds(atom)
sumBondOrders = 0
for key, bond in bonds.iteritems():
if bond.order == 'S': sumBondOrders += 1
if bond.order == 'D': sumBondOrders += 2
if bond.order == 'T': sumBondOrders += 3
if bond.order == 'B': sumBondOrders += 1.5 # We should always have 2 'B' bonds (but what about Cbf?)
if atomTypes['Val4'] in atom.atomType.generic: # Carbon, Silicon
while(atom.radicalElectrons + charge + sumBondOrders < 4):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val5'] in atom.atomType.generic: # Nitrogen
while(atom.radicalElectrons + charge + sumBondOrders < 3):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val6'] in atom.atomType.generic: # Oxygen, sulfur
while(atom.radicalElectrons + charge + sumBondOrders < 2):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
if atomTypes['Val7'] in atom.atomType.generic: # Chlorine
while(atom.radicalElectrons + charge + sumBondOrders < 1):
atom.decrementLonePairs()
atom.incrementRadical()
atom.incrementRadical()
addedToPairs[atom] += 1
saturatedStruct.updateConnectivityValues()
saturatedStruct.sortVertices()
saturatedStruct.updateAtomTypes()
saturatedStruct.updateLonePairs()
saturatedStruct.updateMultiplicity()
return saturatedStruct, addedToPairs
def removeHBonding(self, saturatedStruct, addedToRadicals, addedToPairs, soluteData):
# Remove hydrogen bonds and restore the radical
for atom in addedToRadicals:
for H, bond in addedToRadicals[atom]:
saturatedStruct.removeBond(bond)
saturatedStruct.removeAtom(H)
atom.incrementRadical()
# Change transformed lone pairs back
for atom in addedToPairs:
if addedToPairs[atom] > 0:
for pair in range(1, addedToPairs[atom]):
saturatedStruct.decrementRadical()
saturatedStruct.decrementRadical()
saturatedStruct.incrementLonePairs()
# Update Abraham 'A' H-bonding parameter for unsaturated struct
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen() and atom.radicalElectrons > 0:
for electron in range(1, atom.radicalElectrons):
# Get solute data for radical group
try:
self.__addGroupSoluteData(soluteData, self.groups['radical'], saturatedStruct, {'*':atom})
except KeyError: pass
return soluteData
def estimateSoluteViaGroupAdditivity(self, molecule):
"""
Return the set of Abraham solute parameters corresponding to a given
:class:`Molecule` object `molecule` by estimation using the Platts' group
additivity method. If no group additivity values are loaded, a
:class:`DatabaseError` is raised.
"""
# For thermo estimation we need the atoms to already be sorted because we
# iterate over them; if the order changes during the iteration then we
# will probably not visit the right atoms, and so will get the thermo wrong
molecule.sortVertices()
# Create the SoluteData object with the intercepts from the Platts groups
soluteData = SoluteData(
S = 0.277,
B = 0.071,
E = 0.248,
L = 0.13,
A = 0.003
)
addedToRadicals = {} # Dictionary of key = atom, value = dictionary of {H atom: bond}
addedToPairs = {} # Dictionary of key = atom, value = # lone pairs changed
saturatedStruct = molecule.copy(deep=True)
# Convert lone pairs to radicals, then saturate with H.
# Change lone pairs to radicals based on valency
if sum([atom.lonePairs for atom in saturatedStruct.atoms]) > 0: # molecule contains lone pairs
saturatedStruct, addedToPairs = self.transformLonePairs(saturatedStruct)
# Now saturate radicals with H
if sum([atom.radicalElectrons for atom in saturatedStruct.atoms]) > 0: # radical species
addedToRadicals = saturatedStruct.saturate()
# Saturated structure should now have no unpaired electrons, and only "expected" lone pairs
# based on the valency
for atom in saturatedStruct.atoms:
# Iterate over heavy (non-hydrogen) atoms
if atom.isNonHydrogen():
# Get initial solute data from main group database. Every atom must
# be found in the main abraham database
try:
self.__addGroupSoluteData(soluteData, self.groups['abraham'], saturatedStruct, {'*':atom})
except KeyError:
logging.error("Couldn't find in main abraham database:")
logging.error(saturatedStruct)
logging.error(saturatedStruct.toAdjacencyList())
raise
# Get solute data for non-atom centered groups (being found in this group
# database is optional)
try:
self.__addGroupSoluteData(soluteData, self.groups['nonacentered'], saturatedStruct, {'*':atom})
except KeyError: pass
soluteData = self.removeHBonding(saturatedStruct, addedToRadicals, addedToPairs, soluteData)
return soluteData
def __addGroupSoluteData(self, soluteData, database, molecule, atom):
"""
Determine the Platts group additivity solute data for the atom `atom`
in the structure `structure`, and add it to the existing solute data
`soluteData`.
"""
node0 = database.descendTree(molecule, atom, None)
if node0 is None:
raise KeyError('Node not found in database.')
# It's possible (and allowed) that items in the tree may not be in the
# library, in which case we need to fall up the tree until we find an
# ancestor that has an entry in the library
node = node0
while node is not None and node.data is None:
node = node.parent
if node is None:
raise KeyError('Node has no parent with data in database.')
data = node.data
comment = node.label
while isinstance(data, basestring) and data is not None:
for entry in database.entries.values():
if entry.label == data:
data = entry.data
comment = entry.label
break
comment = '{0}({1})'.format(database.label, comment)
# This code prints the hierarchy of the found node; useful for debugging
#result = ''
#while node is not None:
# result = ' -> ' + node + result
# node = database.tree.parent[node]
#print result[4:]
# Add solute data for each atom to the overall solute data for the molecule.
soluteData.S += data.S
soluteData.B += data.B
soluteData.E += data.E
soluteData.L += data.L
soluteData.A += data.A
soluteData.comment += comment + "+"
return soluteData
def calcH(self, soluteData, solventData):
"""
Returns the enthalpy of solvation, at 298K, in J/mol
"""
# Use Mintz parameters for solvents. Multiply by 1000 to go from kJ->J to maintain consistency
delH = 1000*((soluteData.S*solventData.s_h)+(soluteData.B*solventData.b_h)+(soluteData.E*solventData.e_h)+(soluteData.L*solventData.l_h)+(soluteData.A*solventData.a_h)+solventData.c_h)
return delH
def calcG(self, soluteData, solventData):
"""
Returns the Gibbs free energy of solvation, at 298K, in J/mol
"""
# Use Abraham parameters for solvents to get log K
logK = (soluteData.S*solventData.s_g)+(soluteData.B*solventData.b_g)+(soluteData.E*solventData.e_g)+(soluteData.L*solventData.l_g)+(soluteData.A*solventData.a_g)+solventData.c_g
# Convert to delG with units of J/mol
delG = -8.314*298*2.303*logK
return delG
def calcS(self, delG, delH):
"""
Returns the entropy of solvation, at 298K, in J/mol/K
"""
delS = (delH-delG)/298
return delS
def getSolvationCorrection(self, soluteData, solventData):
"""
Given a soluteData and solventData object, calculates the enthalpy, entropy,
and Gibbs free energy of solvation at 298 K. Returns a SolvationCorrection
object
"""
correction = SolvationCorrection(0.0, 0.0, 0.0)
correction.enthalpy = self.calcH(soluteData, solventData)
correction.gibbs = self.calcG(soluteData, solventData)
correction.entropy = self.calcS(correction.gibbs, correction.enthalpy)
return correction
| mit | 3,259,325,981,605,979,600 | 39.664817 | 194 | 0.567728 | false | 3.882895 | false | false | false |
theneverworks/Sam | tools/braincompiler.py | 1 | 2856 | ##########################################################################################
# Sam - Home and Office Automation SRAI
# (Sam Brain Compiler)
#
# Version 1.0
#
# Used to compile the brain file if needed.
#
# Credits:
# Many great works of many great people are included in Sam
# I have only stacked the legos together.
#
# Based on the Py-AIML or PyAIML or pyAIML interpreter currently cloned by creatorrr
# author: Cort Stratton ([email protected]) web: http://pyaiml.sourceforge.net/
# https://github.com/creatorrr/pyAIML
#
##########################################################################################
import os
import sys
import platform
import time
import aiml
import marshal
import glob
import time
import operator
import csv
# AIML Directory
saiml = "/PATH/sam/aiml/"
#saiml = "C:\\PATH\\sam\\aiml\\"
# brain
k = aiml.Kernel()
# setpreds() function
def setpreds():
with open(saiml + 'preds.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
#print((row[0]), (row[1]))
k.setBotPredicate((row[0]), (row[1]))
plat = platform.machine()
osys = os.name
print "Sam for " + osys
print "System Architecture " + plat
#print "Memory " + psutil.virtual_memory()
k.setBotPredicate("architecture", plat)
k.setBotPredicate("os", osys)
# get_oldest_file() function
def get_oldest_file(files, _invert=False):
""" Find and return the oldest file of input file names.
Only one wins tie. Values based on time distance from present.
Use of `_invert` inverts logic to make this a youngest routine,
to be used more clearly via `get_youngest_file`.
"""
gt = operator.lt if _invert else operator.gt
# Check for empty list.
if not files:
return None
# Raw epoch distance.
now = time.time()
# Select first as arbitrary sentinel file, storing name and age.
oldest = files[0], now - os.path.getmtime(files[0])
# Iterate over all remaining files.
for f in files[1:]:
age = now - os.path.getmtime(f)
if gt(age, oldest[1]):
# Set new oldest.
oldest = f, age
# Return just the name of oldest file.
return oldest[0]
# learn() function
def learn(aimlfiles):
if not aimlfiles:
k.learn(saiml + "xfind.aiml")
for f in aimlfiles[1:]:
k.learn(f)
# brain() function
def brain():
aimlfiles = glob.glob(saiml + "*.aiml")
learn(aimlfiles)
setpreds()
if os.path.isfile(saiml + "sam.ses"):
sessionFile = file(saiml + "sam.ses", "rb")
session = marshal.load(sessionFile)
sessionFile.close()
for pred,value in session.items():
k.setPredicate(pred, value, "sam")
else:
setpreds()
k.saveBrain(saiml + "sam.brn")
if __name__ == "__main__":
brain() | mit | 6,410,904,014,709,480,000 | 26.209524 | 90 | 0.595588 | false | 3.42446 | false | false | false |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/cli/misc.py | 1 | 2480 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral import upi_ranges
from rnacentral_pipeline.databases.crs import parser as crs
from rnacentral_pipeline.rnacentral import pgloader
@click.command("upi-ranges")
@click.option("--db_url", envvar="PGDATABASE")
@click.option("--table-name", default="rna")
@click.argument("chunk_size", type=int)
@click.argument("output", default="-", type=click.File("w"))
def find_upi_ranges(chunk_size, output, db_url=None, table_name=None):
"""
This will compute the ranges to use for our each xml file in the search
export. We want to do several chunks at once as it is faster (but not too
man), and we want to have as large a chunk as possible. If given an a
table_name value it will use that table, otherwise it will use the rna
table.
"""
upi_ranges.to_file(db_url, table_name, chunk_size, output)
@click.command("crs")
@click.argument("filename", default="-", type=click.File("r"))
@click.argument("output", default="complete_features.csv", type=click.File("w"))
def crs_data(filename, output):
"""
This will parse the CRS file to produce a series of sequence features for
import. The features are different from normal sequence features because
these are 'complete', they already have a URS/taxid assigned and can just
be inserted directly into the database.
"""
crs.from_file(filename, output)
@click.command("validate-pgloader")
@click.argument("filename", default="-", type=click.File("r"))
def validate_pgloader(filename):
"""
Check if pgloader ran without errors. Pgloader doesn't seem to crash when it
should so we use this to parse the output and determine if there were any
issues when loading. This is safer then continuing.
"""
if not pgloader.validate(filename):
raise click.ClickException("Pgloader produced errors")
| apache-2.0 | 3,810,839,930,345,933,000 | 38.365079 | 80 | 0.733871 | false | 3.746224 | false | false | false |
bierminen/ninkasi | app/queries.py | 1 | 2230 | #!env/bin/python
#queries.py
#
# Implements methods to answer users queries.
#
# Author: José Lopes de Oliveira Jr. <bierminen.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from flask import jsonify, make_response
from app.models import Style, Subtype, Type
def error_response(status):
if status == 404:
return make_response(jsonify({'status':'not found'}), status)
else:
return make_response(jsonify({'status':'server error ({})'.format(
status)}), status)
def style_response(status, num=-1, n='', st='', t='', ogi=-1, oga=-1,
fgi=-1, fga=-1, abi=-1, aba=-1, ibi=-1, iba=-1, sri=-1, sra=-1, d=''):
if status == 200:
return {'number':num, 'name':n, 'subtype':st, 'type':t,
'og_min':ogi, 'og_max':oga, 'fg_min':fgi, 'fg_max':fga,
'abv_min':abi, 'abv_max':aba, 'ibu_min':ibi, 'ibu_max':iba,
'srm_min':sri, 'srm_max':sra, 'description':d}
else:
return error_response(status)
def get_styles(n):
styles_list = []
if n: styles = Style.query.filter_by(number=n)
else: styles = Style.query.all()
for s in styles:
st = Subtype.query.filter_by(id=s.fk_subtype).first()
t = Type.query.filter_by(id=st.fk_type).first()
styles_list.append(style_response(200, s.number, s.name, st.name,
t.name, s.og_min, s.og_max, s.fg_min, s.fg_max,
s.abv_min, s.abv_max, s.ibu_min, s.ibu_max, s.srm_min, s.srm_max,
s.description))
if len(styles_list):
return jsonify({'status':'OK', 'styles':styles_list})
else:
return style_response(404)
| gpl-3.0 | -5,089,947,436,916,108,000 | 33.292308 | 78 | 0.633468 | false | 3.130618 | false | false | false |
nightflyer73/plugin.video.floptv | resources/lib/floptv.py | 1 | 2784 | import re
import urllib2
import json
from BeautifulSoup import BeautifulSoup
class FlopTV:
__USERAGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0"
__BASEURL = "http://www.floptv.tv"
def __init__(self):
opener = urllib2.build_opener()
# Use Firefox User-Agent
opener.addheaders = [('User-Agent', self.__USERAGENT)]
urllib2.install_opener(opener)
def getShows(self):
pageUrl = "http://www.floptv.tv/show/"
data = urllib2.urlopen(pageUrl).read()
tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
shows = []
sections = tree.find("div", "all-shows").findAll("section")
for section in sections:
items = section.findAll("li")
for item in items:
show = {}
show["title"] = item.text
show["thumb"] = item.find("img")["src"]
show["pageUrl"] = self.__BASEURL + item.find("a")["href"]
shows.append(show)
return shows
def getVideoByShow(self, pageUrl):
data = urllib2.urlopen(pageUrl).read()
tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
videos = []
sections = tree.findAll("section", "tabella")
for section in sections:
items = section.find("tbody").findAll("tr")
for item in items:
video = {}
data = item.findAll("td")
video["title"] = data[0].text + " " + data[2].text
video["thumb"] = item.find("img")["src"].replace("-62x36.jpg", "-307x173.jpg")
video["pageUrl"] = self.__BASEURL + item.find("a")["href"]
videos.append(video)
return videos
def getVideoUrl(self, pageUrl):
# Parse the HTML page to get the Video URL
data = urllib2.urlopen(pageUrl).read()
tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
iframeUrl = tree.find("iframe", {"id": "player"})["src"]
req = urllib2.Request(iframeUrl)
req.add_header('Referer', pageUrl)
data = urllib2.urlopen(req).read()
tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
script = tree.find("script", text=re.compile("playerConfig"))
match = re.search(r'sources\s*:\s*(\[[^\]]+\])', script, re.DOTALL)
string = match.group(1)
# Convert to JSON
string = string.replace('file:','"file":')
sources = json.loads(string)
# Get the first (and better) stream available
videoUrl = sources[0]["file"]
return videoUrl
| gpl-3.0 | 4,364,514,896,675,708,400 | 36.133333 | 97 | 0.55819 | false | 3.965812 | false | false | false |
agentxan/nzbToMedia | libs/mutagen/mp4/__init__.py | 4 | 33798 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Read and write MPEG-4 audio files with iTunes metadata.
This module will read MPEG-4 audio information and metadata,
as found in Apple's MP4 (aka M4A, M4B, M4P) files.
There is no official specification for this format. The source code
for TagLib, FAAD, and various MPEG specifications at
* http://developer.apple.com/documentation/QuickTime/QTFF/
* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt
* http://standards.iso.org/ittf/PubliclyAvailableStandards/\
c041828_ISO_IEC_14496-12_2005(E).zip
* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime
were all consulted.
"""
import struct
import sys
from mutagen import FileType, Tags, StreamInfo, PaddingInfo
from mutagen._constants import GENRES
from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError,
hashable, enum, get_size, resize_bytes)
from mutagen._compat import (reraise, PY2, string_types, text_type, chr_,
iteritems, PY3, cBytesIO, izip, xrange)
from ._atom import Atoms, Atom, AtomError
from ._util import parse_full_atom
from ._as_entry import AudioSampleEntry, ASEntryError
class error(IOError, MutagenError):
pass
class MP4MetadataError(error):
pass
class MP4StreamInfoError(error):
pass
class MP4MetadataValueError(ValueError, MP4MetadataError):
pass
__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType']
@enum
class AtomDataType(object):
"""Enum for `dataformat` attribute of MP4FreeForm.
.. versionadded:: 1.25
"""
IMPLICIT = 0
"""for use with tags for which no type needs to be indicated because
only one type is allowed"""
UTF8 = 1
"""without any count or null terminator"""
UTF16 = 2
"""also known as UTF-16BE"""
SJIS = 3
"""deprecated unless it is needed for special Japanese characters"""
HTML = 6
"""the HTML file header specifies which HTML version"""
XML = 7
"""the XML header must identify the DTD or schemas"""
UUID = 8
"""also known as GUID; stored as 16 bytes in binary (valid as an ID)"""
ISRC = 9
"""stored as UTF-8 text (valid as an ID)"""
MI3P = 10
"""stored as UTF-8 text (valid as an ID)"""
GIF = 12
"""(deprecated) a GIF image"""
JPEG = 13
"""a JPEG image"""
PNG = 14
"""PNG image"""
URL = 15
"""absolute, in UTF-8 characters"""
DURATION = 16
"""in milliseconds, 32-bit integer"""
DATETIME = 17
"""in UTC, counting seconds since midnight, January 1, 1904;
32 or 64-bits"""
GENRES = 18
"""a list of enumerated values"""
INTEGER = 21
"""a signed big-endian integer with length one of { 1,2,3,4,8 } bytes"""
RIAA_PA = 24
"""RIAA parental advisory; { -1=no, 1=yes, 0=unspecified },
8-bit ingteger"""
UPC = 25
"""Universal Product Code, in text UTF-8 format (valid as an ID)"""
BMP = 27
"""Windows bitmap image"""
@hashable
class MP4Cover(bytes):
"""A cover artwork.
Attributes:
* imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG)
"""
FORMAT_JPEG = AtomDataType.JPEG
FORMAT_PNG = AtomDataType.PNG
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, imageformat=FORMAT_JPEG):
self.imageformat = imageformat
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4Cover):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.imageformat == other.imageformat)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.imageformat))
@hashable
class MP4FreeForm(bytes):
"""A freeform value.
Attributes:
* dataformat -- format of the data (see AtomDataType)
"""
FORMAT_DATA = AtomDataType.IMPLICIT # deprecated
FORMAT_TEXT = AtomDataType.UTF8 # deprecated
def __new__(cls, data, *args, **kwargs):
return bytes.__new__(cls, data)
def __init__(self, data, dataformat=AtomDataType.UTF8, version=0):
self.dataformat = dataformat
self.version = version
__hash__ = bytes.__hash__
def __eq__(self, other):
if not isinstance(other, MP4FreeForm):
return bytes(self) == other
return (bytes(self) == bytes(other) and
self.dataformat == other.dataformat and
self.version == other.version)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%r, %r)" % (
type(self).__name__, bytes(self),
AtomDataType(self.dataformat))
def _name2key(name):
if PY2:
return name
return name.decode("latin-1")
def _key2name(key):
if PY2:
return key
return key.encode("latin-1")
def _find_padding(atom_path):
# Check for padding "free" atom
# XXX: we only use them if they are adjacent to ilst, and only one.
# and there also is a top level free atom which we could use maybe..?
meta, ilst = atom_path[-2:]
assert meta.name == b"meta" and ilst.name == b"ilst"
index = meta.children.index(ilst)
try:
prev = meta.children[index - 1]
if prev.name == b"free":
return prev
except IndexError:
pass
try:
next_ = meta.children[index + 1]
if next_.name == b"free":
return next_
except IndexError:
pass
def _item_sort_key(key, value):
# iTunes always writes the tags in order of "relevance", try
# to copy it as closely as possible.
order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb",
"\xa9gen", "gnre", "trkn", "disk",
"\xa9day", "cpil", "pgap", "pcst", "tmpo",
"\xa9too", "----", "covr", "\xa9lyr"]
order = dict(izip(order, xrange(len(order))))
last = len(order)
# If there's no key-based way to distinguish, order by length.
# If there's still no way, go by string comparison on the
# values, so we at least have something determinstic.
return (order.get(key[:4], last), len(repr(value)), repr(value))
class MP4Tags(DictProxy, Tags):
r"""Dictionary containing Apple iTunes metadata list key/values.
Keys are four byte identifiers, except for freeform ('----')
keys. Values are usually unicode strings, but some atoms have a
special structure:
Text values (multiple values per key are supported):
* '\\xa9nam' -- track title
* '\\xa9alb' -- album
* '\\xa9ART' -- artist
* 'aART' -- album artist
* '\\xa9wrt' -- composer
* '\\xa9day' -- year
* '\\xa9cmt' -- comment
* 'desc' -- description (usually used in podcasts)
* 'purd' -- purchase date
* '\\xa9grp' -- grouping
* '\\xa9gen' -- genre
* '\\xa9lyr' -- lyrics
* 'purl' -- podcast URL
* 'egid' -- podcast episode GUID
* 'catg' -- podcast category
* 'keyw' -- podcast keywords
* '\\xa9too' -- encoded by
* 'cprt' -- copyright
* 'soal' -- album sort order
* 'soaa' -- album artist sort order
* 'soar' -- artist sort order
* 'sonm' -- title sort order
* 'soco' -- composer sort order
* 'sosn' -- show sort order
* 'tvsh' -- show name
Boolean values:
* 'cpil' -- part of a compilation
* 'pgap' -- part of a gapless album
* 'pcst' -- podcast (iTunes reads this only on import)
Tuples of ints (multiple values per key are supported):
* 'trkn' -- track number, total tracks
* 'disk' -- disc number, total discs
Others:
* 'tmpo' -- tempo/BPM, 16 bit int
* 'covr' -- cover artwork, list of MP4Cover objects (which are
tagged strs)
* 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead.
The freeform '----' frames use a key in the format '----:mean:name'
where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique
identifier for this frame. The value is a str, but is probably
text that can be decoded as UTF-8. Multiple values per key are
supported.
MP4 tag data cannot exist outside of the structure of an MP4 file,
so this class should not be manually instantiated.
Unknown non-text tags and tags that failed to parse will be written
back as is.
"""
def __init__(self, *args, **kwargs):
self._failed_atoms = {}
super(MP4Tags, self).__init__()
if args or kwargs:
self.load(*args, **kwargs)
def load(self, atoms, fileobj):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError as key:
raise MP4MetadataError(key)
free = _find_padding(path)
self._padding = free.datalength if free is not None else 0
ilst = path[-1]
for atom in ilst.children:
ok, data = atom.read(fileobj)
if not ok:
raise MP4MetadataError("Not enough data")
try:
if atom.name in self.__atoms:
info = self.__atoms[atom.name]
info[0](self, atom, data)
else:
# unknown atom, try as text
self.__parse_text(atom, data, implicit=False)
except MP4MetadataError:
# parsing failed, save them so we can write them back
key = _name2key(atom.name)
self._failed_atoms.setdefault(key, []).append(data)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError("key has to be str")
self._render(key, value)
super(MP4Tags, self).__setitem__(key, value)
@classmethod
def _can_load(cls, atoms):
return b"moov.udta.meta.ilst" in atoms
def _render(self, key, value):
atom_name = _key2name(key)[:4]
if atom_name in self.__atoms:
render_func = self.__atoms[atom_name][1]
else:
render_func = type(self).__render_text
return render_func(self, key, value)
def save(self, filename, padding=None):
"""Save the metadata to the given filename."""
values = []
items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv))
for key, value in items:
try:
values.append(self._render(key, value))
except (TypeError, ValueError) as s:
reraise(MP4MetadataValueError, s, sys.exc_info()[2])
for key, failed in iteritems(self._failed_atoms):
# don't write atoms back if we have added a new one with
# the same name, this excludes freeform which can have
# multiple atoms with the same key (most parsers seem to be able
# to handle that)
if key in self:
assert _key2name(key) != b"----"
continue
for data in failed:
values.append(Atom.render(_key2name(key), data))
data = Atom.render(b"ilst", b"".join(values))
# Find the old atoms.
with open(filename, "rb+") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
self.__save(fileobj, atoms, data, padding)
def __save(self, fileobj, atoms, data, padding):
try:
path = atoms.path(b"moov", b"udta", b"meta", b"ilst")
except KeyError:
self.__save_new(fileobj, atoms, data, padding)
else:
self.__save_existing(fileobj, atoms, path, data, padding)
def __save_new(self, fileobj, atoms, ilst_data, padding_func):
hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9)
meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data
try:
path = atoms.path(b"moov", b"udta")
except KeyError:
path = atoms.path(b"moov")
offset = path[-1]._dataoffset
# ignoring some atom overhead... but we don't have padding left anyway
# and padding_size is guaranteed to be less than zero
content_size = get_size(fileobj) - offset
padding_size = -len(meta_data)
assert padding_size < 0
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
new_padding = min(0xFFFFFFFF, new_padding)
free = Atom.render(b"free", b"\x00" * new_padding)
meta = Atom.render(b"meta", meta_data + free)
if path[-1].name != b"udta":
# moov.udta not found -- create one
data = Atom.render(b"udta", meta)
else:
data = meta
insert_bytes(fileobj, len(data), offset)
fileobj.seek(offset)
fileobj.write(data)
self.__update_parents(fileobj, path, len(data))
self.__update_offsets(fileobj, atoms, len(data), offset)
def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func):
# Replace the old ilst atom.
ilst = path[-1]
offset = ilst.offset
length = ilst.length
# Use adjacent free atom if there is one
free = _find_padding(path)
if free is not None:
offset = min(offset, free.offset)
length += free.length
# Always add a padding atom to make things easier
padding_overhead = len(Atom.render(b"free", b""))
content_size = get_size(fileobj) - (offset + length)
padding_size = length - (len(ilst_data) + padding_overhead)
info = PaddingInfo(padding_size, content_size)
new_padding = info._get_padding(padding_func)
# Limit padding size so we can be sure the free atom overhead is as we
# calculated above (see Atom.render)
new_padding = min(0xFFFFFFFF, new_padding)
ilst_data += Atom.render(b"free", b"\x00" * new_padding)
resize_bytes(fileobj, length, len(ilst_data), offset)
delta = len(ilst_data) - length
fileobj.seek(offset)
fileobj.write(ilst_data)
self.__update_parents(fileobj, path[:-1], delta)
self.__update_offsets(fileobj, atoms, delta, offset)
def __update_parents(self, fileobj, path, delta):
"""Update all parent atoms with the new size."""
if delta == 0:
return
for atom in path:
fileobj.seek(atom.offset)
size = cdata.uint_be(fileobj.read(4))
if size == 1: # 64bit
# skip name (4B) and read size (8B)
size = cdata.ulonglong_be(fileobj.read(12)[4:])
fileobj.seek(atom.offset + 8)
fileobj.write(cdata.to_ulonglong_be(size + delta))
else: # 32bit
fileobj.seek(atom.offset)
fileobj.write(cdata.to_uint_be(size + delta))
def __update_offset_table(self, fileobj, fmt, atom, delta, offset):
"""Update offset table in the specified atom."""
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 12)
data = fileobj.read(atom.length - 12)
fmt = fmt % cdata.uint_be(data[:4])
offsets = struct.unpack(fmt, data[4:])
offsets = [o + (0, delta)[offset < o] for o in offsets]
fileobj.seek(atom.offset + 16)
fileobj.write(struct.pack(fmt, *offsets))
def __update_tfhd(self, fileobj, atom, delta, offset):
if atom.offset > offset:
atom.offset += delta
fileobj.seek(atom.offset + 9)
data = fileobj.read(atom.length - 9)
flags = cdata.uint_be(b"\x00" + data[:3])
if flags & 1:
o = cdata.ulonglong_be(data[7:15])
if o > offset:
o += delta
fileobj.seek(atom.offset + 16)
fileobj.write(cdata.to_ulonglong_be(o))
def __update_offsets(self, fileobj, atoms, delta, offset):
"""Update offset tables in all 'stco' and 'co64' atoms."""
if delta == 0:
return
moov = atoms[b"moov"]
for atom in moov.findall(b'stco', True):
self.__update_offset_table(fileobj, ">%dI", atom, delta, offset)
for atom in moov.findall(b'co64', True):
self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset)
try:
for atom in atoms[b"moof"].findall(b'tfhd', True):
self.__update_tfhd(fileobj, atom, delta, offset)
except KeyError:
pass
def __parse_data(self, atom, data):
pos = 0
while pos < atom.length - 8:
head = data[pos:pos + 12]
if len(head) != 12:
raise MP4MetadataError("truncated atom % r" % atom.name)
length, name = struct.unpack(">I4s", head[:8])
version = ord(head[8:9])
flags = struct.unpack(">I", b"\x00" + head[9:12])[0]
if name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (name, atom.name))
chunk = data[pos + 16:pos + length]
if len(chunk) != length - 16:
raise MP4MetadataError("truncated atom % r" % atom.name)
yield version, flags, chunk
pos += length
def __add(self, key, value, single=False):
assert isinstance(key, str)
if single:
self[key] = value
else:
self.setdefault(key, []).extend(value)
def __render_data(self, key, version, flags, value):
return Atom.render(_key2name(key), b"".join([
Atom.render(
b"data", struct.pack(">2I", version << 24 | flags, 0) + data)
for data in value]))
def __parse_freeform(self, atom, data):
length = cdata.uint_be(data[:4])
mean = data[12:length]
pos = length
length = cdata.uint_be(data[pos:pos + 4])
name = data[pos + 12:pos + length]
pos += length
value = []
while pos < atom.length - 8:
length, atom_name = struct.unpack(">I4s", data[pos:pos + 8])
if atom_name != b"data":
raise MP4MetadataError(
"unexpected atom %r inside %r" % (atom_name, atom.name))
version = ord(data[pos + 8:pos + 8 + 1])
flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0]
value.append(MP4FreeForm(data[pos + 16:pos + length],
dataformat=flags, version=version))
pos += length
key = _name2key(atom.name + b":" + mean + b":" + name)
self.__add(key, value)
def __render_freeform(self, key, value):
if isinstance(value, bytes):
value = [value]
dummy, mean, name = _key2name(key).split(b":", 2)
mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean
name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name
data = b""
for v in value:
flags = AtomDataType.UTF8
version = 0
if isinstance(v, MP4FreeForm):
flags = v.dataformat
version = v.version
data += struct.pack(
">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0)
data += v
return Atom.render(b"----", mean + name + data)
def __parse_pair(self, atom, data):
key = _name2key(atom.name)
values = [struct.unpack(">2H", d[2:6]) for
version, flags, d in self.__parse_data(atom, data)]
self.__add(key, values)
def __render_pair(self, key, value):
data = []
for v in value:
try:
track, total = v
except TypeError:
raise ValueError
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">4H", 0, track, total, 0))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __render_pair_no_trailing(self, key, value):
data = []
for (track, total) in value:
if 0 <= track < 1 << 16 and 0 <= total < 1 << 16:
data.append(struct.pack(">3H", 0, track, total))
else:
raise MP4MetadataValueError(
"invalid numeric pair %r" % ((track, total),))
return self.__render_data(key, 0, AtomDataType.IMPLICIT, data)
def __parse_genre(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0
if len(data) != 2:
raise MP4MetadataValueError("invalid genre")
genre = cdata.short_be(data)
# Translate to a freeform genre.
try:
genre = GENRES[genre - 1]
except IndexError:
# this will make us write it back at least
raise MP4MetadataValueError("unknown genre")
values.append(genre)
key = _name2key(b"\xa9gen")
self.__add(key, values)
def __parse_tempo(self, atom, data):
values = []
for version, flags, data in self.__parse_data(atom, data):
# version = 0, flags = 0 or 21
if len(data) != 2:
raise MP4MetadataValueError("invalid tempo")
values.append(cdata.ushort_be(data))
key = _name2key(atom.name)
self.__add(key, values)
def __render_tempo(self, key, value):
try:
if len(value) == 0:
return self.__render_data(key, 0, AtomDataType.INTEGER, b"")
if (min(value) < 0) or (max(value) >= 2 ** 16):
raise MP4MetadataValueError(
"invalid 16 bit integers: %r" % value)
except TypeError:
raise MP4MetadataValueError(
"tmpo must be a list of 16 bit integers")
values = [cdata.to_ushort_be(v) for v in value]
return self.__render_data(key, 0, AtomDataType.INTEGER, values)
def __parse_bool(self, atom, data):
for version, flags, data in self.__parse_data(atom, data):
if len(data) != 1:
raise MP4MetadataValueError("invalid bool")
value = bool(ord(data))
key = _name2key(atom.name)
self.__add(key, value, single=True)
def __render_bool(self, key, value):
return self.__render_data(
key, 0, AtomDataType.INTEGER, [chr_(bool(value))])
def __parse_cover(self, atom, data):
values = []
pos = 0
while pos < atom.length - 8:
length, name, imageformat = struct.unpack(">I4sI",
data[pos:pos + 12])
if name != b"data":
if name == b"name":
pos += length
continue
raise MP4MetadataError(
"unexpected atom %r inside 'covr'" % name)
if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG):
# Sometimes AtomDataType.IMPLICIT or simply wrong.
# In all cases it was jpeg, so default to it
imageformat = MP4Cover.FORMAT_JPEG
cover = MP4Cover(data[pos + 16:pos + length], imageformat)
values.append(cover)
pos += length
key = _name2key(atom.name)
self.__add(key, values)
def __render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + cover))
return Atom.render(_key2name(key), b"".join(atom_data))
def __parse_text(self, atom, data, implicit=True):
# implicit = False, for parsing unknown atoms only take utf8 ones.
# For known ones we can assume the implicit are utf8 too.
values = []
for version, flags, atom_data in self.__parse_data(atom, data):
if implicit:
if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8):
raise MP4MetadataError(
"Unknown atom type %r for %r" % (flags, atom.name))
else:
if flags != AtomDataType.UTF8:
raise MP4MetadataError(
"%r is not text, ignore" % atom.name)
try:
text = atom_data.decode("utf-8")
except UnicodeDecodeError as e:
raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e))
values.append(text)
key = _name2key(atom.name)
self.__add(key, values)
def __render_text(self, key, value, flags=AtomDataType.UTF8):
if isinstance(value, string_types):
value = [value]
encoded = []
for v in value:
if not isinstance(v, text_type):
if PY3:
raise TypeError("%r not str" % v)
try:
v = v.decode("utf-8")
except (AttributeError, UnicodeDecodeError) as e:
raise TypeError(e)
encoded.append(v.encode("utf-8"))
return self.__render_data(key, 0, flags, encoded)
def delete(self, filename):
"""Remove the metadata from the given filename."""
self._failed_atoms.clear()
self.clear()
self.save(filename, padding=lambda x: 0)
__atoms = {
b"----": (__parse_freeform, __render_freeform),
b"trkn": (__parse_pair, __render_pair),
b"disk": (__parse_pair, __render_pair_no_trailing),
b"gnre": (__parse_genre, None),
b"tmpo": (__parse_tempo, __render_tempo),
b"cpil": (__parse_bool, __render_bool),
b"pgap": (__parse_bool, __render_bool),
b"pcst": (__parse_bool, __render_bool),
b"covr": (__parse_cover, __render_cover),
b"purl": (__parse_text, __render_text),
b"egid": (__parse_text, __render_text),
}
# these allow implicit flags and parse as text
for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt",
b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp",
b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too",
b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco",
b"sosn", b"tvsh"]:
__atoms[name] = (__parse_text, __render_text)
def pprint(self):
def to_line(key, value):
assert isinstance(key, text_type)
if isinstance(value, text_type):
return u"%s=%s" % (key, value)
return u"%s=%r" % (key, value)
values = []
for key, value in sorted(iteritems(self)):
if not isinstance(key, text_type):
key = key.decode("latin-1")
if key == "covr":
values.append(u"%s=%s" % (key, u", ".join(
[u"[%d bytes of data]" % len(data) for data in value])))
elif isinstance(value, list):
for v in value:
values.append(to_line(key, v))
else:
values.append(to_line(key, value))
return u"\n".join(values)
class MP4Info(StreamInfo):
"""MPEG-4 stream information.
Attributes:
* bitrate -- bitrate in bits per second, as an int
* length -- file length in seconds, as a float
* channels -- number of audio channels
* sample_rate -- audio sampling rate in Hz
* bits_per_sample -- bits per sample
* codec (string):
* if starting with ``"mp4a"`` uses an mp4a audio codec
(see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``)
* for everything else see a list of possible values at
http://www.mp4ra.org/codecs.html
e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc.
* codec_description (string):
Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in
the future, use for display purposes only.
"""
bitrate = 0
channels = 0
sample_rate = 0
bits_per_sample = 0
codec = u""
codec_name = u""
def __init__(self, atoms, fileobj):
try:
moov = atoms[b"moov"]
except KeyError:
raise MP4StreamInfoError("not a MP4 file")
for trak in moov.findall(b"trak"):
hdlr = trak[b"mdia", b"hdlr"]
ok, data = hdlr.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
if data[8:12] == b"soun":
break
else:
raise MP4StreamInfoError("track has no audio data")
mdhd = trak[b"mdia", b"mdhd"]
ok, data = mdhd.read(fileobj)
if not ok:
raise MP4StreamInfoError("Not enough data")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version == 0:
offset = 8
fmt = ">2I"
elif version == 1:
offset = 16
fmt = ">IQ"
else:
raise MP4StreamInfoError("Unknown mdhd version %d" % version)
end = offset + struct.calcsize(fmt)
unit, length = struct.unpack(fmt, data[offset:end])
try:
self.length = float(length) / unit
except ZeroDivisionError:
self.length = 0
try:
atom = trak[b"mdia", b"minf", b"stbl", b"stsd"]
except KeyError:
pass
else:
self._parse_stsd(atom, fileobj)
def _parse_stsd(self, atom, fileobj):
"""Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError.
"""
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
# look at the first entry if there is one
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description
def pprint(self):
return "MPEG-4 audio (%s), %.2f seconds, %d bps" % (
self.codec_description, self.length, self.bitrate)
class MP4(FileType):
"""An MPEG-4 audio file, probably containing AAC.
If more than one track is present in the file, the first is used.
Only audio ('soun') tracks will be read.
:ivar info: :class:`MP4Info`
:ivar tags: :class:`MP4Tags`
"""
MP4Tags = MP4Tags
_mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"]
def load(self, filename):
self.filename = filename
with open(filename, "rb") as fileobj:
try:
atoms = Atoms(fileobj)
except AtomError as err:
reraise(error, err, sys.exc_info()[2])
try:
self.info = MP4Info(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4StreamInfoError, err, sys.exc_info()[2])
if not MP4Tags._can_load(atoms):
self.tags = None
self._padding = 0
else:
try:
self.tags = self.MP4Tags(atoms, fileobj)
except error:
raise
except Exception as err:
reraise(MP4MetadataError, err, sys.exc_info()[2])
else:
self._padding = self.tags._padding
def save(self, filename=None, padding=None):
super(MP4, self).save(filename, padding=padding)
def delete(self, filename=None):
super(MP4, self).delete(filename)
def add_tags(self):
if self.tags is None:
self.tags = self.MP4Tags()
else:
raise error("an MP4 tag already exists")
@staticmethod
def score(filename, fileobj, header_data):
return (b"ftyp" in header_data) + (b"mp4" in header_data)
Open = MP4
def delete(filename):
"""Remove tags from a file."""
MP4(filename).delete()
| gpl-3.0 | -4,780,732,970,170,729,000 | 32.038123 | 79 | 0.550831 | false | 3.760766 | false | false | false |
bitmovin/bitmovin-python | bitmovin/resources/models/encodings/muxings/mp4_muxing.py | 1 | 4217 | from bitmovin.errors import InvalidTypeError
from bitmovin.resources.models.encodings.muxings.time_code import TimeCode
from bitmovin.resources.enums.mp4_muxing_manifest_type import MP4MuxingManifestType
from .muxing import Muxing
class MP4Muxing(Muxing):
def __init__(self, streams, filename=None, outputs=None, id_=None, custom_data=None, name=None, description=None,
ignored_by=None, fragment_duration=None, time_code=None, fragmented_mp4_muxing_manifest_type=None,
stream_conditions_mode=None, internal_chunk_length=None):
super().__init__(id_=id_, custom_data=custom_data, streams=streams, outputs=outputs,
name=name, description=description, ignored_by=ignored_by,
stream_conditions_mode=stream_conditions_mode, internal_chunk_length=internal_chunk_length)
self.filename = filename
self.fragmentDuration = fragment_duration
self._timeCode = None
self.timeCode = time_code
self._fragmentedMP4MuxingManifestType = None
self.fragmentedMP4MuxingManifestType = fragmented_mp4_muxing_manifest_type
@property
def timeCode(self):
return self._timeCode
@timeCode.setter
def timeCode(self, new_time_code):
if new_time_code is None:
self._timeCode = None
return
if isinstance(new_time_code, TimeCode):
self._timeCode = new_time_code
else:
raise InvalidTypeError(
'Invalid type {} for timeCode: must be TimeCode object!'.format(
type(new_time_code)
))
@property
def fragmentedMP4MuxingManifestType(self):
return self._fragmentedMP4MuxingManifestType
@fragmentedMP4MuxingManifestType.setter
def fragmentedMP4MuxingManifestType(self, new_fragmented_mp4_muxing_manifest_type):
if new_fragmented_mp4_muxing_manifest_type is None:
self._fragmentedMP4MuxingManifestType = None
elif isinstance(new_fragmented_mp4_muxing_manifest_type, MP4MuxingManifestType):
self._fragmentedMP4MuxingManifestType = new_fragmented_mp4_muxing_manifest_type.value
elif isinstance(new_fragmented_mp4_muxing_manifest_type, str):
self._fragmentedMP4MuxingManifestType = new_fragmented_mp4_muxing_manifest_type
else:
raise InvalidTypeError('fragmentedMP4MuxingManifestType has to be of type MP4MuxingManifestType or str')
@classmethod
def parse_from_json_object(cls, json_object):
muxing = super().parse_from_json_object(json_object=json_object)
filename = json_object['filename']
fragment_duration = json_object.get('fragmentDuration')
time_code_json = json_object.get('timeCode')
time_code = None
if time_code_json is not None:
time_code = TimeCode.parse_from_json_object(time_code_json)
fragmented_mp4_muxing_manifest_type = json_object.get('fragmentedMP4MuxingManifestType')
mp4_muxing = MP4Muxing(filename=filename,
fragment_duration=fragment_duration,
time_code=time_code,
fragmented_mp4_muxing_manifest_type=fragmented_mp4_muxing_manifest_type,
id_=muxing.id,
streams=muxing.streams,
outputs=muxing.outputs,
custom_data=muxing.customData,
name=muxing.name,
description=muxing.description,
ignored_by=muxing.ignored_by,
stream_conditions_mode=muxing.stream_conditions_mode,
internal_chunk_length=muxing.internal_chunk_length)
return mp4_muxing
def serialize(self):
serialized = super().serialize()
if self.timeCode is not None:
serialized['timeCode'] = self.timeCode.serialize()
serialized['fragmentedMP4MuxingManifestType'] = self.fragmentedMP4MuxingManifestType
return serialized
| unlicense | -3,620,123,299,172,139,500 | 44.836957 | 117 | 0.630069 | false | 4.066538 | false | false | false |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/zabbix_openstack/filters.py | 1 | 2541 | from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import serializers
from waldur_core.core import serializers as core_serializers
from waldur_core.structure import models as structure_models
from waldur_zabbix.apps import ZabbixConfig
class LinkFilterBackend(DjangoFilterBackend):
"""
This filter allows to filter Zabbix service project link by URL of virtual machine.
Consider for example the following use case.
There're two OpenStack virtual machines in the Waldur project.
Zabbix server is installed on the first VM.
Zabbix agent is to be installed on the second VM.
Note, that both of them share the same OpenStack tenant.
Therefore, they should be able to communicate directly, ie without proxy or virtual router.
There's service settings for Zabbix provider in Waldur database.
It is configured with scope field equal to the Zabbix server VM.
Also, there are Zabbix service and Zabbix service project link configured for the project.
By supplying URL of the OpenStack service project link to this filter backend,
we should be able to get list of all Zabbix service project links
which could be used as Zabbix monitoring in the same OpenStack tenant.
"""
def filter_queryset(self, request, queryset, view):
resource_url = request.query_params.get('resource')
if resource_url:
try:
resource = self.get_resource_by_url(request, resource_url)
except serializers.ValidationError:
return queryset.none()
link = resource.service_project_link
siblings = resource._meta.model.objects.filter(
service_project_link=link
).exclude(uuid=resource.uuid)
if siblings.count() == 0:
return queryset.none()
service_settings = structure_models.ServiceSettings.objects.filter(
type=ZabbixConfig.service_name, scope__in=siblings,
)
queryset = queryset.filter(
project=link.project, service__settings__in=service_settings
)
return queryset
def get_resource_by_url(self, request, resource_url):
related_models = structure_models.VirtualMachine.get_all_models()
field = core_serializers.GenericRelatedField(related_models=related_models)
# Trick to set field context without serializer
field._context = {'request': request}
return field.to_internal_value(resource_url)
| mit | 5,400,995,361,679,140,000 | 43.578947 | 95 | 0.695789 | false | 4.636861 | false | false | false |
iilxy/ijandan | Freeze_py2exe.py | 1 | 2656 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Setup script for distributing SIFT as a stand-alone executable
from distutils.core import setup
import glob
import py2exe
import sys
import pkg_resources
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
#sys.argv.append("-q")
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = "0.1"
self.company_name = u"昕睿软件"
self.copyright = u"昕睿软件"
self.name = u"SexyGirl"
manifest = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="0.64.1.0"
processorArchitecture="x86"
name="Controls"
type="win32"
/>
<description>myProgram</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
RT_MANIFEST = 24
MyApp = Target(
# used for the versioninfo resource
description = u"SexyGirl Downloader",
# what to build
script = "spynner_test.py",
#other_resources = [(24,1,manifest)],
icon_resources = [(1, "lsc.ico")],
dest_base = "dist")
py2exe_options = {
"includes": ["sqlite3","sip"], #PyQt程序打包时需要
"dll_excludes": ["w9xpopen.exe",],
"excludes" : ["Tkconstants","Tkinter","tcl","doctest","pdb","unittest"],
"compressed": 1, #压缩文件
"optimize": 2, #优化级别,默认为0
#"ascii": 0, #ascii指不自动包含encodings和codecs
"bundle_files": 3, #指将程序打包成单文件(此时除了exe文件外,还会生成一个zip文件。如果不需要zip文件,还需要设置zipfile = None)。1表示pyd和dll文件会被打包到单文件中,且不能从文件系统中加载python模块;值为2表示pyd和dll文件会被打包到单文件中,但是可以从文件系统中加载python模块
}
data_files=[("",
["lsc.ico","msvcr90.dll"])]
setup(
name = u'dist',
version = '1.0',
#windows = [MyApp],
console = [MyApp],
#zipfile = None,
options = {'py2exe': py2exe_options},
data_files = data_files,
) | apache-2.0 | -8,958,125,155,379,249,000 | 26.047059 | 180 | 0.588581 | false | 2.78271 | false | false | false |
WinawerLab/MRI_tools | preprocessing/to_freesurfer.py | 1 | 6411 | #! /usr/bin/env python
####################################################################################################
# Script to reorient volumes into anatomical orientation and to create surface time-series.
# This script is made for use with a registration file (in FreeSurfer's tkreg format) and
# a series of unwarped time-series volumes: i.e., the output of Serra's preprocessing
# script.
# Author: Noah C. Benson <[email protected]>
import argparse, sys, os, six
import neuropythy as ny, numpy as np, nibabel as nib
if six.PY3:
from functools import reduce
def main(args):
# Parse the arguments...
parser = argparse.ArgumentParser()
parser.add_argument('reg', metavar='registration_file', nargs=1,
help=('The distort2anat_tkreg.dat or similar file: the registration'
' file, in FreeSurfer\'s tkreg format, to apply to the EPIs.'))
parser.add_argument('epis', metavar='EPI', type=str, nargs='+',
help='The EPI files to be converted to anatomical orientation')
parser.add_argument('-t', '--tag', required=False, default='-', dest='tag', nargs=1,
help=('A tag to append to the output filenames; if given as - or'
' omitted, overwrites original files.'))
parser.add_argument('-s', '--surf', required=False, default=False,
dest='surface', action='store_true',
help=('If provided, instructs the script to also produce files of the '
'time-series resampled on the cortical surface.'))
parser.add_argument('-o', '--out', required=False, default='.', dest='outdir',
help=('The output directory to which the files should be written; by'
' default this is the current directory (.); note that if this'
' directory also contains the EPI files and there is no tag given,'
' then the EPIs will be overwritten.'))
parser.add_argument('-m', '--method', required=False, default='linear', dest='method',
help=('The method to use for volume-to-surface interpolation; this may'
' be nearest or linear; the default is linear.'))
parser.add_argument('-l', '--layer', required=False, default='midgray', dest='layer',
help=('Specifies the cortical layer to user in interpolation from volume'
' to surface. By default, uses midgray. May be set to a value'
' between 0 (white) and 1 (pial) to specify an intermediate surface'
' or may be simply white, pial, or midgray.'))
parser.add_argument('-d', '--subjects-dir', required=False, default=None, dest='sdir',
help=('Specifies the subjects directory to use; by default uses the'
' environment variable SUBJECTS_DIR.'))
parser.add_argument('-v', '--verbose', required=False, default=False, action='store_true',
dest='verbose', help='Print verbose output')
if args[0].startswith('python'): args = args[2:]
else: args = args[1:]
args = parser.parse_args(args)
# Check some of the arguments...
epis = args.epis
if len(epis) < 1: raise RuntimeError('No EPIs given')
tag = args.tag[0]
if tag == '-': tag = ''
dosurf = args.surface
outdir = args.outdir
if not os.path.isdir(outdir):
raise RuntimeError('Directory %s does not exist' % outdir)
if args.verbose:
def note(*args):
six.print_(*args, flush=True)
return True
else:
def note(*args):
return False
try: args.layer = float(args.layer)
except: pass
# Read in the registration file
args.reg = args.reg[0]
if not os.path.isfile(args.reg):
raise RuntimeError('Given registration file not found: %s' % args.reg)
with open(args.reg, 'r') as f:
lines = []
while True:
s = f.readline()
if s is None or s == '': break
lines.append(s)
# This tells us some info...
sub = lines[0].strip()
if args.sdir is not None:
ny.add_subject_path(args.sdir)
try: sub = ny.freesurfer_subject(sub)
except: raise ValueError('No subject %s; you may need to set your SUBJECTS_DIR' % sub)
affine = np.asarray([[float(ss) for ss in s.split()] for s in lines[4:8]])
affinv = np.linalg.inv(affine)
displm = sub.lh.affine
# loop over the given EPIs
for epi in epis:
note('Processing EPI %s...' % epi)
# import the epi file..
img = ny.load(epi, to='image')
# edit the header...
note(' - Correcting volume orientation...')
new_affine = np.dot(displm, np.dot(affinv, ny.freesurfer.tkr_vox2ras(img)))
newimg = nib.Nifti1Image(img.dataobj, new_affine, img.header)
(epi_dir,epi_flnm) = os.path.split(epi)
if epi_flnm[:-4] in ['.mgz', '.mgh', '.nii']:
pre = epi_flnm[:-4]
suf = epi_flnm[-4:]
else:
pre = epi_flnm[:-7]
suf = epi_flnm[-7:]
srf_flnm = pre + tag + '.mgz'
epi_flnm = pre + tag + suf
newimg.to_filename(os.path.join(args.outdir, epi_flnm))
# okay, now project to the surface
if args.surface:
note(' - Projecting to surface...')
(ldat, rdat) = sub.image_to_cortex(newimg, surface=args.layer,
method=args.method, dtype=np.float32)
# we need to fix the dimensions...
for (d,h) in zip([ldat,rdat], ['lh','rh']):
if d.shape[-1] == 1:
# then this should properly be a 3d MGH image, not a 4d one.
im = nib.freesurfer.mghformat.MGHImage(
np.transpose(reduce(np.expand_dims, [-1], d), (0,2,1)),
np.eye(4))
else:
im = nib.freesurfer.mghformat.MGHImage(
np.transpose(reduce(np.expand_dims, [-1,-1], d), (0,2,3,1)),
np.eye(4))
im.to_filename(os.path.join(args.outdir, h + '.' + srf_flnm))
# That's it!
return 0
main(sys.argv)
| gpl-3.0 | -6,402,878,796,139,789,000 | 49.480315 | 100 | 0.546873 | false | 3.845831 | false | false | false |
hakkeroid/lcconcept | tests/sources/test_etcdstore.py | 1 | 2919 | # -*- coding: utf-8 -*-
import pytest
from layeredconfig import EtcdStore
from layeredconfig.sources.etcdstore import EtcdConnector
try:
import requests
except:
# skip all tests when yaml is not installed
pytestmark = pytest.mark.skip(reason='Missing optional dependencies')
@pytest.fixture
def connector():
class Connector:
"""Simple etcd connector"""
def __init__(self):
self.get_data = {}
self.set_data = {}
@pytest.helpers.inspector
def get(self, *args, **kwargs):
return self.get_data
@pytest.helpers.inspector
def set(self, *items):
self.set_data.update(items)
connector = Connector()
connector.get_data = {
'node': {
'nodes': [{
'key': 'a',
'value': '1'
}, {
'key': 'b',
'dir': True,
'nodes': [{
'key': 'c',
'value': '2'
}, {
'key': 'd',
'dir': True,
'nodes': [{
'key': 'e',
'value': '3'
}]
}]
}]
}
}
return connector
@pytest.mark.parametrize('key', ['/', '/a'])
def test_etcd_connector_get_data(monkeypatch, key):
url = 'http://fake-url:2379'
connector = EtcdConnector(url)
class Response(object):
def json(self):
return {}
def get(*args, **kwargs):
assert url + '/keys' + key == args[0]
assert 'recursive' in kwargs['params']
return Response()
monkeypatch.setattr('layeredconfig.sources.etcdstore.requests.get', get)
connector.get(key)
@pytest.mark.parametrize('key, value', [
('/a', 1),
('/b', 2),
])
def test_etcd_connector_set_data(monkeypatch, key, value):
url = 'http://fake-url:2379'
connector = EtcdConnector(url)
def put(*args, **kwargs):
assert url + '/keys' + key == args[0]
assert value == kwargs['data']['value']
monkeypatch.setattr('layeredconfig.sources.etcdstore.requests.put', put)
connector.set((key, value))
def test_lazy_read_etcd_source(connector):
config = EtcdStore('bogus-url')
config._connector = connector
# etcd is untyped
assert config.a == '1'
assert config.b.c == '2'
assert config.b.d == {'e': '3'}
assert config._connector.get.calls == 1
config._use_cache = False
config.a
assert config._connector.get.calls == 2
def test_write_etcd_source(connector):
config = EtcdStore('bogus-url')
config._connector = connector
config.a = '10'
config.b.c = '20'
config.b.d.e = '30'
config.write_cache()
data = connector.set_data
assert data['/a'] == '10'
assert data['/b/c'] == '20'
assert data['/b/d/e'] == '30'
| bsd-3-clause | -8,357,687,103,487,166,000 | 23.325 | 76 | 0.523124 | false | 3.723214 | true | false | false |
XiaJieCom/change | stu103151/days24/stark/arya/needle/modules/base_module.py | 1 | 1074 | #!/usr/bin/env python3
from core.utils import MsgPrint
class BaseSaltModule(object):
def __init__(self,task_obj):
self.task_obj = task_obj
def process(self,module_data,*args,**kwargs):
section_name = module_data['raw_cmds']['section']
section_data = module_data['raw_cmds']['mod_data']
sub_action = module_data['raw_cmds'].get('sub_action')
for mod_item in section_data:
for k,v in mod_item.items():
state_func = getattr(self,'func__%s'%k)
state_func(v)
if sub_action:
# 如果没有,就执行,基本只针对文件和模块
sub_action_func = getattr(self,'func__%s' % sub_action)
sub_action_func(module_data=module_data['raw_cmds'])
def func__require(self,*args,**kwargs):
print('require:',*args,**kwargs)
def type_validate(self,item_name,data,data_type):
if type(data) is not data_type:
MsgPrint.error("[%s] requires %s, not a %s" %(item_name,data_type,type(data)))
| lgpl-2.1 | -355,915,979,643,375,700 | 26.368421 | 90 | 0.571154 | false | 3.219814 | false | false | false |
openwisp/nodeshot | nodeshot/ui/default/tests/django.py | 8 | 2526 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.test import TestCase
from nodeshot.core.base.tests import user_fixtures
from nodeshot.ui.default import settings as local_settings
class DefaultUiDjangoTest(TestCase):
fixtures = [
'initial_data.json',
user_fixtures,
'test_layers.json',
'test_status.json',
'test_nodes.json',
'test_images.json',
]
def test_index(self):
response = self.client.get(reverse('ui:index'))
self.assertEqual(response.status_code, 200)
def test_social_auth_optional(self):
# enable social auth
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True)
response = self.client.get(reverse('ui:index'))
self.assertContains(response, 'social-buttons')
# disable social auth
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False)
response = self.client.get(reverse('ui:index'))
self.assertNotContains(response, 'social-buttons')
def test_facebook_optional(self):
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True)
setattr(local_settings, 'FACEBOOK_ENABLED', True)
response = self.client.get(reverse('ui:index'))
self.assertContains(response, 'btn-facebook')
setattr(local_settings, 'FACEBOOK_ENABLED', False)
response = self.client.get(reverse('ui:index'))
self.assertNotContains(response, 'btn-facebook')
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False)
def test_google_optional(self):
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True)
setattr(local_settings, 'GOOGLE_ENABLED', True)
response = self.client.get(reverse('ui:index'))
self.assertContains(response, 'btn-google')
setattr(local_settings, 'GOOGLE_ENABLED', False)
response = self.client.get(reverse('ui:index'))
self.assertNotContains(response, 'btn-google')
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False)
def test_github_optional(self):
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True)
setattr(local_settings, 'GITHUB_ENABLED', True)
response = self.client.get(reverse('ui:index'))
self.assertContains(response, 'btn-github')
setattr(local_settings, 'GITHUB_ENABLED', False)
response = self.client.get(reverse('ui:index'))
self.assertNotContains(response, 'btn-github')
setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False)
| gpl-3.0 | 4,067,982,266,221,205,000 | 39.741935 | 61 | 0.666271 | false | 3.92236 | true | false | false |
deisi/SFG2D | sfg2d/utils/detect_peaks.py | 1 | 8692 | """Detect peaks in data based on their amplitude and other features."""
from __future__ import division, print_function
import numpy as np
__author__ = "Marcos Duarte, https://github.com/demotu/BMC, Modified by Malte Deiseroth"
__version__ = "1.0.4.1"
__license__ = "MIT"
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indices of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indices by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
def detect_peaks_vectorized(x, **kwargs):
"""
vectorized version of detect_peaks.
--------
input:
x 1d or 2d array like to search for peaks.
if 2d, peaks are searched in a row wise patter,
thus we loop over x.
for the rest see detect_peaks, but with the addition, that all
kwargs are also vectorized and thus instead of ont mph one can also
enter a list of mphs where each mph corresponds to one row of the
2d spectrum in x.
--------
return
The return mimiks the behaviour of numpy masks. See e.g.
`np.where(array([1,2,3,4,5] > 3))`
In the case of 1d x-array, a numpy mask like tuple of peakpostitions
is returned.
In the case of a 2d x-array a numpy mask like tuple is returned. 0.
element of the tuple is an array with the numbers of the spektrum the
peak was found
in, 1. element of the tubple is the position of the peak in the spectrum.
"""
# check and vectorize user input
D1, D2 = {}, {} # 1D and 2D kwarg input
for key, value in kwargs.items():
# print(key, value)
if np.shape(value) == () : D1[key] = value
elif np.shape(value) == (len(x),): D2[key] = value
else: raise IOError("Cant handle dim of %s. Int, float or iterable of shape %s was expected, but %s was found."%(key, len(x), np.shape(value)))
# pass everything to detect_peak in the 1d case
if len(np.shape(x)) == 1:
peakpos = detect_peaks(x, **kwargs)
return (peakpos) # format according to numpy masks
# find peaks an format output according to numpy mask conventions
peakpos = np.array([])
pos = np.array([])
for i in range(len(x)):
elm = x[i]
kwrds = dict(D1)
for key, value in D2.items():
kwrds[key] = value[i]
peaks = detect_peaks(elm, **kwrds )
peakpos = np.hstack((peakpos, peaks))
pos = np.hstack((pos, i * np.ones(np.shape(peaks), dtype=np.int32)))
return np.int32(pos), np.int32(peakpos)
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
else:
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
plt.show()
| mit | -2,232,896,576,626,879,700 | 37.122807 | 151 | 0.573746 | false | 3.268898 | false | false | false |
andr-04/Telethon | telethon_examples/print_updates.py | 1 | 1612 | #!/usr/bin/env python3
# A simple script to print all updates received
from getpass import getpass
from os import environ
# environ is used to get API information from environment variables
# You could also use a config file, pass them as arguments,
# or even hardcode them (not recommended)
from telethon import TelegramClient
from telethon.errors import SessionPasswordNeededError
def main():
session_name = environ.get('TG_SESSION', 'session')
user_phone = environ['TG_PHONE']
client = TelegramClient(session_name,
int(environ['TG_API_ID']),
environ['TG_API_HASH'],
proxy=None,
update_workers=4)
print('INFO: Connecting to Telegram Servers...', end='', flush=True)
client.connect()
print('Done!')
if not client.is_user_authorized():
print('INFO: Unauthorized user')
client.send_code_request(user_phone)
code_ok = False
while not code_ok:
code = input('Enter the auth code: ')
try:
code_ok = client.sign_in(user_phone, code)
except SessionPasswordNeededError:
password = getpass('Two step verification enabled. Please enter your password: ')
code_ok = client.sign_in(password=password)
print('INFO: Client initialized succesfully!')
client.add_update_handler(update_handler)
input('Press Enter to stop this!\n')
def update_handler(update):
print(update)
print('Press Enter to stop this!')
if __name__ == '__main__':
main()
| mit | 7,799,248,620,090,208,000 | 34.043478 | 97 | 0.621588 | false | 4.275862 | false | false | false |
colmenero/Misc | MailAnalysis/script.py | 1 | 8755 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from os import listdir, path
import re
import copy
import datetime
import matplotlib.pyplot as plt
def get_mails(*folders):
""" Gets the filename of every mail to analyze
:param folders: Folders to analyze (comma separated arguments)
:return mails: List of all files in given folders
:type folders: Tuple of strings
:rtype: list
"""
mails = []
for folder in folders:
temp = [path.join(folder, file) for file in listdir(folder)
if path.isfile(path.join(folder, file))]
mails = mails + temp
return mails
def get_data(mails, headers):
""" Gets data specified in headers from the mails
:param mails: List of mail filenames. Output of get_mails
:params headers: List of mail headers to get info from
:return data: Dictionary of the data associated with the headers
Each index is named as the corresponding header. The value is
a list of lines, each one belonging to one mail.
:return cnt_skipped: Counter of skipped files due to Unicode decoding errors
:type mails: List
:type headers: List
:rtype data: Dictionary
:rtype cnt_skipped: int
"""
# Init empty dictionary
data = {}
for i in headers:
data[i] = []
cnt_skipped = 0
for file in mails:
with open(file, 'r') as f:
# If text files are not unicode, there will be an error.
# However detecting formats is not trivial, right now we
# are just ignoring files producing errors.
try:
lines = f.readlines()
except UnicodeDecodeError:
cnt_skipped = cnt_skipped + 1
continue
for line in lines:
for header in headers:
if line[0:len(header)] == header:
# TODO: +1line? (next one starts \t)
data[header].append(line[len(header):-1])
return (data, cnt_skipped)
def regex_mails(from_to):
""" Gets the email address (only) from the from/to field, removing
everything else.
E.g: Super Lopez Jimenez <[email protected]> --> [email protected]
:param from_to: Field from/to from email header.
:return data: List of all files in given folders
:type from_to: string
:rtype email_add: string
Reference:
http://stackoverflow.com/questions/4026685/regex-to-get-text-between-two-characters
"""
regex_str = "(?<=<)[^}]*(?=>)"
temp = re.search(regex_str, from_to)
if not temp:
email_add = []
else:
email_add = temp.group(0)
return email_add
def date_format(date):
""" Changes date format to python datetime object.
Input format: Thu, [0]4 Sep 2014 14:40:18 +0200 [(CET)]
Depends of mail
:param date: Input date
:return date_out: List of all files in given folders
:type date: string
:rtype date_out: date object
Reference:
http://stackoverflow.com/questions/4666973/how-to-extract-a-substring-from-inside-a-string-in-python
"""
regex_str = ", (.+?):"
temp = re.search(regex_str, date)
if temp:
temp = temp.group(0)
# Less hardcode?
month_table = {
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
year = int(temp[-8:-4])
month = month_table[temp[-12:-9]]
day = int(temp[-15:-13])
hour = int(date[len(temp):len(temp)+2])
minute = int(date[len(temp)+3:len(temp)+5])
second = int(date[len(temp)+6:len(temp)+8])
try:
offset = int(date[len(temp)+9:len(temp)+12])
except:
# Some mails do not have UTC offset in numbers, just as string
# We will ignore this, though it wouldn't be difficult to map
# strings to number using a dictionary (TODO)
offset = 0
hour = (hour + offset) % 24
date_out = datetime.datetime(year, month, day, hour, minute, second)
else:
date_out = []
return date_out
def data_munge(data_raw):
""" Changes data format
Emails list is replaced with a dictionary with only one entry for
each email (and a counter), deleting also aliases
Build a python datetime object from the Date header
:param data_raw: Output of get_data
:return data: Dicitonary with munged data
:type data_raw: dictionary
:rtype data: dictionary
"""
# We can make it faster using a shallow copy if we are not interested in
# keeping data_raw.
data = copy.deepcopy(data_raw)
# Changing email from/to fields
for header in ['From: ', 'To: ']:
if header in data_raw.keys():
for ind in range(len(data[header])):
# TODO: Handling multiple receivers
data[header][ind] = regex_mails(data[header][ind])
temp = {}
for addr in data[header]:
if addr: # not empty
if addr in temp:
temp[addr] = temp[addr] + 1
else:
temp[addr] = 1
data[header] = temp
if 'Date: ' in data_raw.keys():
for ind in range(len(data['Date: '])):
data['Date: '][ind] = date_format(data['Date: '][ind])
return data
def hour_mails(data):
""" Calculate percentage of mails sent on each hour of the day
:param data: Output of data_munge
:return cnt: List of counters. First element is at 1am, second at 2am...
:type data: dictionary
rtype cnt: list
"""
cnt = [0]*24
cnt_total = 0
if 'Date: ' in data.keys():
for date in data['Date: ']:
if date: # not empty
cnt[date.hour] = cnt[date.hour] + 1
cnt_total = cnt_total + 1
cnt = [100*x/cnt_total for x in cnt]
return cnt
def emails_involved(data):
""" Prints to a file emails involved and a counter for each one
:param data: Output of data_munge
:type data: dictionary
Reference:
http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
"""
filename_out = 'resultados.txt'
with open(filename_out, 'w') as f:
if 'To: ' in data.keys():
f.write('******************************************************\n')
f.write(' To \n')
f.write(' E-mail + Counter\n')
f.write('------------------------------------------------------\n')
for mail, cnt in \
sorted(data['To: '].items(),
key=lambda x: x[1], reverse=True):
f.write(mail + '\t' + str(cnt) + '\n')
f.write('******************************************************\n')
if 'From: ' in data.keys():
f.write('******************************************************\n')
f.write(' From \n')
f.write(' E-mail + Counter\n')
f.write('------------------------------------------------------\n')
for mail, cnt in \
sorted(data['From: '].items(),
key=lambda x: x[1], reverse=True):
f.write(mail + '\t' + str(cnt) + '\n')
f.write('******************************************************\n')
if __name__ == '__main__':
debug = False
mails = get_mails('./Inbox', './Sent')
headers = ['Date: ', 'From: ', 'To: ', 'Subject: ']
(data_raw, cnt_skipped) = get_data(mails, headers)
data = data_munge(data_raw)
hour_cnt = hour_mails(data)
# Output
emails_involved(data)
fig1 = plt.figure()
axis1 = fig1.add_subplot(111)
axis1.set_title('Percentage of mails sent each hour of the day')
axis1.set_xlabel('Hour')
axis1.set_ylabel('%')
x_labels = list(range(1, 25))
axis1.bar(x_labels, hour_cnt, align='center')
axis1.set_xticks(x_labels)
axis1.set_xlim(1, 24)
plt.show()
# TODO: analyse content, limit data to time frame...
if debug is True:
print('{} skipped files due to unicode decoding error'
.format(cnt_skipped))
# Mails saved using offlineimap and a google account.
# Folders needed: inbox and Sent.
# Notes:
# In python variables defined in main are global, but I prefer to declare
# arguments
# References:
#
| gpl-3.0 | -8,130,193,597,380,591,000 | 29.719298 | 108 | 0.525985 | false | 3.884206 | false | false | false |
aldanor/sidecar | src/sidecar/tags.py | 1 | 6977 | # -*- coding: utf-8 -*-
import collections
import keyword
import numbers
import re
import six
from sidecar.element import Element, expr
_TAGS = """
a abbr address area article aside audio b base bdi bdo big blockquote body br button
canvas caption cite code col colgroup data datalist dd del details dfn dialog div dl dt
em embed fieldset figcaption figure footer form h1 h2 h3 h4 h5 h6 head header hgroup hr
html i iframe img input ins kbd keygen label legend li link main map mark menu menuitem
meta meter nav noscript object ol optgroup option output p param picture pre progress q
rp rt ruby s samp script section select small source span strong style sub summary sup
table tbody td textarea tfoot th thead time title tr track u ul var video wbr
""".split()
_VOID_TAGS = """
area base br col embed hr img input keygen link menuitem meta param source track wbr
""".split()
_ATTRIBUTES = """
accept acceptCharset accessKey action allowFullScreen allowTransparency alt async
autoComplete autoFocus autoPlay capture cellPadding cellSpacing challenge charSet
checked classID className colSpan cols content contentEditable contextMenu controls
coords crossOrigin data dateTime default defer dir disabled download draggable encType
form formAction formEncType formMethod formNoValidate formTarget frameBorder headers
height hidden high href hrefLang htmlFor httpEquiv icon id inputMode integrity is
keyParams keyType kind label lang list loop low manifest marginHeight marginWidth max
maxLength media mediaGroup method min minLength multiple muted name noValidate nonce
open optimum pattern placeholder poster preload radioGroup readOnly rel required
reversed role rowSpan rows sandbox scope scoped scrolling seamless selected shape size
sizes span spellCheck src srcDoc srcLang srcSet start step style summary tabIndex target
title type useMap value width wmode wrap
""".split()
_STYLES = """
alignContent alignItems alignSelf animation animationDelay animationDirection
animationDuration animationFillMode animationIterationCount animationName
animationTimingFunction animationPlayState background backgroundAttachment
backgroundColor backgroundImage backgroundPosition backgroundRepeat backgroundClip
backgroundOrigin backgroundSize backfaceVisibility border borderBottom borderBottomColor
borderBottomLeftRadius borderBottomRightRadius borderBottomStyle borderBottomWidth
borderCollapse borderColor borderImage borderImageOutset borderImageRepeat
borderImageSlice borderImageSource borderImageWidth borderLeft borderLeftColor
borderLeftStyle borderLeftWidth borderRadius borderRight borderRightColor
borderRightStyle borderRightWidth borderSpacing borderStyle borderTop borderTopColor
borderTopLeftRadius borderTopRightRadius borderTopStyle borderTopWidth borderWidth
bottom boxDecorationBreak boxShadow boxSizing captionSide clear clip color columnCount
columnFill columnGap columnRule columnRuleColor columnRuleStyle columnRuleWidth columns
columnSpan columnWidth content counterIncrement counterReset cursor direction display
emptyCells filter flex flexBasis flexDirection flexFlow flexGrow flexShrink flexWrap
cssFloat font fontFamily fontSize fontStyle fontVariant fontWeight fontSizeAdjust
fontStretch hangingPunctuation height hyphens icon imageOrientation justifyContent
left letterSpacing lineHeight listStyle listStyleImage listStylePosition listStyleType
margin marginBottom marginLeft marginRight marginTop maxHeight maxWidth minHeight
minWidth navDown navIndex navLeft navRight navUp opacity order orphans outline
outlineColor outlineOffset outlineStyle outlineWidth overflow overflowX overflowY
padding paddingBottom paddingLeft paddingRight paddingTop pageBreakAfter pageBreakBefore
pageBreakInside perspective perspectiveOrigin position quotes resize right tableLayout
tabSize textAlign textAlignLast textDecoration textDecorationColor textDecorationLine
textDecorationStyle textIndent textJustify textOverflow textShadow textTransform top
transform transformOrigin transformStyle transition transitionProperty transitionDuration
transitionTimingFunction transitionDelay unicodeBidi verticalAlign visibility whiteSpace
width wordBreak wordSpacing wordWrap widows zIndex
""".split()
def _register_html_tags():
elements = {}
def clsdict(name):
def __init__(self, **props):
super(elements[name], self).__init__(name, props=props)
def _convert_props(self, **props):
props, props_items = {}, props.items()
for k, v in props_items:
# convert snakecase to camelcase for all props
k = re.sub(r'_([a-z])', lambda s: s.group(1).upper(), k)
# allow trailing underscore if a prop is a Python keyword
if k and k.endswith('_') and keyword.iskeyword(k[:-1]):
k = k[:-1]
if k == 'class':
k = 'className'
elif k == 'for':
k = 'htmlFor'
if k not in _ATTRIBUTES:
raise RuntimeError('unknown attribute: {}'.format(k))
if k in props:
raise RuntimeError('duplicate attribute: {}'.format(k))
# style attribute must be a dict
if k == 'style':
if not isinstance(v, collections.Mapping):
raise RuntimeError('invalid style: {}'.format(v))
v, v_items = {}, v.items()
for sk, sv in v_items:
# convert snakecase (dashes allowed) to camelcase
sk = re.sub(r'[\-_]([a-z])', lambda s: s.group(1).upper(), sk)
if sk not in _STYLES:
raise RuntimeError('unknown style: {}'.format(sk))
if sk in v:
raise RuntimeError('duplicate style: {}'.format(sk))
# only allow strings, integers and expressions for styles
if not isinstance(sv, (six.string_types, numbers.Real, expr)):
raise RuntimeError('invalid style: {}={}'.format(sk, sv))
v[sk] = sv
else:
# only allow strings or expressions for non-style attributes
if not isinstance(v, (six.string_types, expr)):
raise RuntimeError('invalid attribute: {}={}'.format(k, v))
props[k] = v
return props
return {
'__init__': __init__,
'__doc__': '<{}> HTML tag.'.format(name),
'_convert_props': _convert_props,
'allow_children': name not in _VOID_TAGS
}
for tag in _TAGS:
elements[tag] = type(tag, (Element,), clsdict(tag))
# register tag in the global namespace, append underscore if it's a Python keyword
globals()[tag + '_' * keyword.iskeyword(tag)] = elements[tag]
_register_html_tags()
| apache-2.0 | -8,047,939,656,042,221,000 | 52.669231 | 90 | 0.713057 | false | 4.76571 | false | false | false |
lafranceinsoumise/api-django | agir/groups/views.py | 1 | 26405 | import logging
from uuid import UUID
import ics
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.paginator import Paginator
from django.core.validators import validate_email
from django.db import IntegrityError
from django.db.models import Q
from django.http import (
Http404,
HttpResponseRedirect,
HttpResponseBadRequest,
JsonResponse,
HttpResponse,
HttpResponseForbidden,
)
from django.template.response import TemplateResponse
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views import View
from django.views.generic import (
UpdateView,
ListView,
DeleteView,
DetailView,
TemplateView,
)
from django.views.generic.edit import ProcessFormView, FormMixin, FormView
from agir.authentication.tokens import (
invitation_confirmation_token_generator,
abusive_invitation_report_token_generator,
)
from agir.authentication.utils import hard_login
from agir.authentication.view_mixins import (
HardLoginRequiredMixin,
PermissionsRequiredMixin,
VerifyLinkSignatureMixin,
)
from agir.donations.allocations import get_balance
from agir.donations.models import SpendingRequest
from agir.front.view_mixins import (
ObjectOpengraphMixin,
ChangeLocationBaseView,
SearchByZipcodeBaseView,
)
from agir.groups.actions.pressero import redirect_to_pressero, is_pressero_enabled
from agir.groups.actions.promo_codes import (
get_next_promo_code,
is_promo_code_delayed,
next_promo_code_date,
)
from agir.groups.models import SupportGroup, Membership, SupportGroupSubtype
from agir.groups.tasks import (
send_someone_joined_notification,
send_abuse_report_message,
)
from agir.lib.http import add_query_params_to_url
from agir.lib.utils import front_url
from agir.people.views import (
ConfirmSubscriptionView,
subscription_confirmation_token_generator,
Person,
)
from .forms import (
SupportGroupForm,
AddReferentForm,
AddManagerForm,
GroupGeocodingForm,
SearchGroupForm,
ExternalJoinForm,
InvitationWithSubscriptionConfirmationForm,
InvitationForm,
)
__all__ = [
"SupportGroupManagementView",
"CreateSupportGroupView",
"PerformCreateSupportGroupView",
"ModifySupportGroupView",
"QuitSupportGroupView",
"RemoveManagerView",
"SupportGroupDetailView",
"ThematicTeamsViews",
"ChangeGroupLocationView",
"SupportGroupListView",
]
logger = logging.getLogger(__name__)
class CheckMembershipMixin:
def user_is_referent(self):
return self.user_membership is not None and self.user_membership.is_referent
def user_is_manager(self):
return self.user_membership is not None and (
self.user_membership.is_referent or self.user_membership.is_manager
)
@property
def user_membership(self):
if not hasattr(self, "_user_membership"):
if isinstance(self.object, SupportGroup):
group = self.object
else:
group = self.object.supportgroup
try:
self._user_membership = group.memberships.get(
person=self.request.user.person
)
except Membership.DoesNotExist:
self._user_membership = None
return self._user_membership
class SupportGroupListView(SearchByZipcodeBaseView):
"""List of groups, filter by zipcode
"""
min_items = 20
template_name = "groups/group_list.html"
context_object_name = "groups"
form_class = SearchGroupForm
def get_base_queryset(self):
return SupportGroup.objects.active().order_by("name")
class SupportGroupDetailView(ObjectOpengraphMixin, DetailView):
template_name = "groups/detail.html"
queryset = SupportGroup.objects.active().all()
title_prefix = "Groupe d'action"
meta_description = "Rejoignez les groupes d'action de la France insoumise."
def get_template_names(self):
return ["groups/detail.html"]
def get_context_data(self, **kwargs):
events_future = Paginator(
self.object.organized_events.upcoming().distinct().order_by("start_time"), 5
).get_page(self.request.GET.get("events_future_page"))
events_past = Paginator(
self.object.organized_events.past().distinct().order_by("-start_time"), 5
).get_page(self.request.GET.get("events_past_page"))
return super().get_context_data(
events_future=events_future,
events_past=events_past,
is_member=self.request.user.is_authenticated
and self.object.memberships.filter(
person=self.request.user.person
).exists(),
is_referent_or_manager=self.request.user.is_authenticated
and self.object.memberships.filter(
Q(person=self.request.user.person)
& (Q(is_referent=True) | Q(is_manager=True))
).exists(),
**kwargs,
)
@method_decorator(login_required(login_url=reverse_lazy("short_code_login")))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
if not request.user.person.is_insoumise and not self.object.allow_external:
return HttpResponseForbidden()
if request.POST["action"] == "join":
try:
membership = Membership.objects.create(
supportgroup=self.object, person=request.user.person
)
send_someone_joined_notification.delay(membership.pk)
except IntegrityError:
pass # the person is already a member of the group
return HttpResponseRedirect(
reverse("view_group", kwargs={"pk": self.object.pk})
)
return HttpResponseBadRequest()
class SupportGroupIcsView(DetailView):
queryset = SupportGroup.objects.active().all()
def render_to_response(self, context, **response_kwargs):
calendar = ics.Calendar(
events=[
ics.event.Event(
name=event.name,
begin=event.start_time,
end=event.end_time,
uid=str(event.pk),
description=event.description,
location=event.short_address,
url=front_url("view_event", args=[event.pk], auto_login=False),
)
for event in context["supportgroup"].organized_events.all()
]
)
return HttpResponse(calendar, content_type="text/calendar")
class SupportGroupManagementView(
HardLoginRequiredMixin, CheckMembershipMixin, DetailView
):
template_name = "groups/manage.html"
queryset = SupportGroup.objects.active().all().prefetch_related("memberships")
messages = {
"add_referent_form": ugettext_lazy(
"{email} est maintenant correctement signalé comme second·e animateur·rice."
),
"add_manager_form": ugettext_lazy(
"{email} a bien été ajouté·e comme gestionnaire pour ce groupe."
),
"invitation_form": ugettext_lazy(
"{email} a bien été invité à rejoindre votre groupe."
),
}
need_referent_status = {"add_referent_form", "add_manager_form"}
active_panel = {
"add_referent_form": "animation",
"add_manager_form": "animation",
"invitation_form": "invitation",
}
def get_forms(self):
kwargs = {}
if self.request.method in ("POST", "PUT"):
kwargs.update({"data": self.request.POST})
return {
"add_referent_form": AddReferentForm(self.object, **kwargs),
"add_manager_form": AddManagerForm(self.object, **kwargs),
"invitation_form": InvitationForm(
group=self.object, inviter=self.request.user.person, **kwargs
),
}
def get_context_data(self, **kwargs):
kwargs["referents"] = self.object.memberships.filter(is_referent=True).order_by(
"created"
)
kwargs["managers"] = self.object.memberships.filter(
is_manager=True, is_referent=False
).order_by("created")
kwargs["members"] = self.object.memberships.all().order_by("created")
kwargs["has_promo_code"] = self.object.tags.filter(
label=settings.PROMO_CODE_TAG
).exists()
if kwargs["has_promo_code"]:
kwargs["group_promo_code"] = get_next_promo_code(self.object)
if is_promo_code_delayed():
kwargs["promo_code_delay"] = next_promo_code_date()
kwargs["certifiable"] = (
self.object.type in settings.CERTIFIABLE_GROUP_TYPES
or self.object.subtypes.filter(
label__in=settings.CERTIFIABLE_GROUP_SUBTYPES
).exists()
)
kwargs["satisfy_requirements"] = len(kwargs["referents"]) > 1
kwargs["allocation_balance"] = get_balance(self.object)
kwargs["spending_requests"] = SpendingRequest.objects.filter(
group=self.object
).exclude(status=SpendingRequest.STATUS_PAID)
kwargs["is_pressero_enabled"] = is_pressero_enabled()
kwargs["active"] = self.active_panel.get(self.request.POST.get("form"))
forms = self.get_forms()
for form_name, form in forms.items():
kwargs.setdefault(form_name, form)
return super().get_context_data(
is_referent=self.user_membership is not None
and self.user_membership.is_referent,
is_manager=self.user_membership is not None
and (self.user_membership.is_referent or self.user_membership.is_manager),
**kwargs,
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
# only managers can access the page
if not self.user_is_manager():
raise PermissionDenied("Vous n'etes pas gestionnaire de ce groupe.")
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_name = request.POST.get("form")
# only referents can add referents and managers
if not self.user_is_referent() and form_name in self.need_referent_status:
raise PermissionDenied(
"Vous n'êtes pas animateur de ce groupe et ne pouvez donc pas modifier les "
"animateurs et gestionnaires."
)
forms = self.get_forms()
if form_name in forms:
form = forms[form_name]
if form.is_valid():
params = form.perform()
messages.add_message(
request, messages.SUCCESS, self.messages[form_name].format(**params)
)
else:
return self.render_to_response(
self.get_context_data(**{form_name: form})
)
return HttpResponseRedirect(
reverse("manage_group", kwargs={"pk": self.object.pk})
)
class CreateSupportGroupView(HardLoginRequiredMixin, TemplateView):
template_name = "groups/create.html"
def get_context_data(self, **kwargs):
person = self.request.user.person
initial = {}
if person.contact_phone:
initial["phone"] = person.contact_phone.as_e164
if person.first_name and person.last_name:
initial["name"] = "{} {}".format(person.first_name, person.last_name)
return super().get_context_data(props={"initial": initial}, **kwargs)
class PerformCreateSupportGroupView(HardLoginRequiredMixin, FormMixin, ProcessFormView):
model = SupportGroup
form_class = SupportGroupForm
def get_form_kwargs(self):
"""Add user person profile to the form kwargs"""
kwargs = super().get_form_kwargs()
person = self.request.user.person
kwargs["person"] = person
return kwargs
def form_invalid(self, form):
return JsonResponse({"errors": form.errors}, status=400)
def form_valid(self, form):
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message="Votre groupe a été correctement créé.",
)
form.save()
return JsonResponse(
{
"status": "OK",
"id": form.instance.id,
"url": reverse("view_group", args=[form.instance.id]),
}
)
class ModifySupportGroupView(
HardLoginRequiredMixin, PermissionsRequiredMixin, UpdateView
):
permissions_required = ("groups.change_supportgroup",)
template_name = "groups/modify.html"
queryset = SupportGroup.objects.active().all()
form_class = SupportGroupForm
def get_form_kwargs(self):
"""Add user person profile to the form kwargs"""
return {**super().get_form_kwargs(), "person": self.request.user.person}
def get_success_url(self):
return reverse("manage_group", kwargs={"pk": self.object.pk})
def form_valid(self, form):
# first get response to make sure there's no error when saving the model before adding message
res = super().form_valid(form)
messages.add_message(
request=self.request,
level=messages.SUCCESS,
message=format_html(
_("Les modifications du groupe <em>{}</em> ont été enregistrées."),
self.object.name,
),
)
return res
class RemoveManagerView(HardLoginRequiredMixin, CheckMembershipMixin, DetailView):
template_name = "front/confirm.html"
queryset = (
Membership.objects.active()
.all()
.select_related("supportgroup")
.select_related("person")
)
def get_context_data(self, **kwargs):
person = self.object.person
if person.first_name and person.last_name:
name = "{} {} <{}>".format(
person.first_name, person.last_name, person.email
)
else:
name = person.email
return super().get_context_data(
title=_("Confirmer le retrait du gestionnaire ?"),
message=_(
f"""
Voulez-vous vraiment retirer {name} de la liste des gestionnaires de ce groupe ?
"""
),
button_text="Confirmer le retrait",
)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if not self.user_is_referent():
raise PermissionDenied(
"Vous n'êtes pas animateur de cet événement et ne pouvez donc pas modifier les "
"animateurs et gestionnaires."
)
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
# user has to be referent, and target user cannot be a referent
if not self.user_is_referent() or self.object.is_referent:
raise PermissionDenied(
"Vous n'êtes pas animateur de cet événement et ne pouvez donc pas modifier les "
"animateurs et gestionnaires."
)
self.object.is_manager = False
self.object.save()
messages.add_message(
request,
messages.SUCCESS,
_("{} n'est plus un gestionnaire du groupe.").format(
self.object.person.email
),
)
return HttpResponseRedirect(
reverse_lazy("manage_group", kwargs={"pk": self.object.supportgroup_id})
)
class QuitSupportGroupView(HardLoginRequiredMixin, DeleteView):
template_name = "groups/quit.html"
success_url = reverse_lazy("dashboard")
queryset = Membership.objects.active().all()
context_object_name = "membership"
def get_object(self, queryset=None):
try:
return (
self.get_queryset()
.select_related("supportgroup")
.get(
supportgroup__pk=self.kwargs["pk"], person=self.request.user.person
)
)
except Membership.DoesNotExist:
raise Http404()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["group"] = self.object.supportgroup
context["success_url"] = self.get_success_url()
return context
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
# make sure user is not a referent who cannot quit groups
if (
self.object.is_referent
and len(self.object.supportgroup.memberships.filter(is_referent=True)) < 2
):
messages.add_message(
request,
messages.ERROR,
_(
"Vous êtes seul animateur⋅rice de ce groupe, et ne pouvez donc pas le quitter."
" Votre groupe doit d'abord se choisir un ou une autre animatrice pour permettre votre départ."
),
)
else:
self.object.delete()
messages.add_message(
request,
messages.SUCCESS,
format_html(
_("Vous avez bien quitté le groupe <em>{}</em>"),
self.object.supportgroup.name,
),
)
return HttpResponseRedirect(success_url)
class ExternalJoinSupportGroupView(ConfirmSubscriptionView, FormView, DetailView):
queryset = SupportGroup.objects.filter(subtypes__allow_external=True)
form_class = ExternalJoinForm
show_already_created_message = False
create_insoumise = False
def dispatch(self, request, *args, **kwargs):
self.group = self.object = self.get_object()
return super().dispatch(request, *args, **kwargs)
def success_page(self):
if Membership.objects.filter(
person=self.person, supportgroup=self.group
).exists():
messages.add_message(
request=self.request,
level=messages.INFO,
message=_("Vous êtes déjà membre."),
)
return HttpResponseRedirect(reverse("view_group", args=[self.group.pk]))
Membership.objects.get_or_create(person=self.person, supportgroup=self.group)
messages.add_message(
request=self.request,
level=messages.INFO,
message=_("Vous avez bien rejoint le groupe."),
)
return HttpResponseRedirect(reverse("view_group", args=[self.group.pk]))
def form_valid(self, form):
form.send_confirmation_email(self.group)
messages.add_message(
request=self.request,
level=messages.INFO,
message=_(
"Un email vous a été envoyé. Merrci de cliquer sur le "
"lien qu'il contient pour confirmer."
),
)
return HttpResponseRedirect(reverse("view_group", args=[self.group.pk]))
def form_invalid(self, form):
return HttpResponseRedirect(reverse("view_group", args=[self.group.pk]))
class ThematicTeamsViews(ListView):
template_name = "groups/thematic_teams.html"
context_object_name = "groups"
def get_queryset(self):
subtype = SupportGroupSubtype.objects.get(label="rédaction du livret")
return SupportGroup.objects.active().filter(subtypes=subtype).order_by("name")
def get_context_data(self, **kwargs):
return super().get_context_data(
**kwargs, default_image="front/images/AEC-mini.png"
)
class ChangeGroupLocationView(ChangeLocationBaseView):
template_name = "groups/change_location.html"
form_class = GroupGeocodingForm
queryset = SupportGroup.objects.active().all()
success_view_name = "manage_group"
class RedirectToPresseroView(HardLoginRequiredMixin, DetailView):
template_name = "groups/pressero_error.html"
queryset = SupportGroup.objects.active()
def get(self, request, *args, **kwargs):
group = self.get_object()
person = request.user.person
if not is_pressero_enabled():
raise Http404("Cette page n'existe pas")
if not group.is_certified:
raise Http404("Cette page n'existe pas")
if not Membership.objects.filter(
supportgroup=group, person=person, is_manager=True
).exists:
raise PermissionDenied("Vous ne pouvez pas accéder à cette page.")
try:
return redirect_to_pressero(person)
except Exception as e:
logger.error("Problème rencontré avec l'API Pressero", exc_info=True)
return TemplateResponse(request, self.template_name)
class InvitationConfirmationView(VerifyLinkSignatureMixin, View):
signature_generator = invitation_confirmation_token_generator
def get(self, request, *args, **kwargs):
token_params = self.get_signed_values()
if token_params is None:
return self.link_error_page()
try:
person = Person.objects.get(pk=UUID(token_params["person_id"]))
group = SupportGroup.objects.get(pk=UUID(token_params["group_id"]))
except (ValueError, Person.DoesNotExist):
return self.link_error_page()
except SupportGroup.DoesNotExist:
messages.add_message(
request=request,
level=messages.ERROR,
message="Le groupe qui vous a invité n'existe plus.",
)
return HttpResponseRedirect(reverse("dashboard"))
membership, created = Membership.objects.get_or_create(
supportgroup=group, person=person
)
if created:
messages.add_message(
request,
messages.SUCCESS,
format_html(
"Vous venez de rejoindre le groupe d'action <em>{group_name}</em>",
group_name=group.name,
),
)
else:
messages.add_message(
request, messages.INFO, "Vous étiez déjà membre de ce groupe."
)
return HttpResponseRedirect(reverse("view_group", args=(group.pk,)))
class InvitationWithSubscriptionView(VerifyLinkSignatureMixin, FormView):
form_class = InvitationWithSubscriptionConfirmationForm
signature_generator = subscription_confirmation_token_generator
signed_params = ["email", "group_id"]
template_name = "groups/invitation_subscription.html"
def dispatch(self, request, *args, **kwargs):
token_params = self.get_signed_values()
if not token_params:
return self.link_error_page()
self.email = token_params["email"]
try:
validate_email(self.email)
except ValidationError:
return self.link_error_page()
# Cas spécial : la personne s'est déjà créé un compte entretemps
# ==> redirection vers l'autre vue
try:
person = Person.objects.get_by_natural_key(self.email)
except Person.DoesNotExist:
pass
else:
params = {"person_id": str(person.id), "group_id": token_params["group_id"]}
query_params = {
**params,
"token": invitation_confirmation_token_generator.make_token(**params),
}
return HttpResponseRedirect(
add_query_params_to_url(
reverse("invitation_confirmation"), query_params
)
)
try:
self.group = SupportGroup.objects.get(pk=UUID(token_params["group_id"]))
except ValueError:
# pas un UUID
return self.link_error_page()
except SupportGroup.DoesNotExist:
# le groupe a disparu entre temps...
self.group = None
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return super().get_context_data(group=self.group)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["group"] = self.group
kwargs["email"] = self.email
return kwargs
def form_valid(self, form):
p = form.save()
hard_login(self.request, p)
return TemplateResponse(self.request, "people/confirmation_subscription.html")
class InvitationAbuseReportingView(VerifyLinkSignatureMixin, View):
signature_generator = abusive_invitation_report_token_generator
form_template_name = "groups/invitation_abuse.html"
confirmed_template_name = "groups/invitation_abuse_confirmed.html"
def dispatch(self, request, *args, **kwargs):
self.token_params = self.get_signed_values()
if not self.token_params:
return self.link_error_page()
self.timestamp = abusive_invitation_report_token_generator.get_timestamp(
request.GET.get("token")
)
try:
self.group_id = UUID(self.token_params["group_id"])
self.inviter_id = UUID(self.token_params["inviter_id"])
except ValueError:
return self.link_error_page()
return super().dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
return TemplateResponse(request, template=self.form_template_name)
def post(self, request, *args, **kwargs):
if self.inviter_id:
send_abuse_report_message.delay(str(self.inviter_id))
logger.info(
msg=f"Abus d'invitation signalé ({self.group_id}, {self.inviter_id}, {self.timestamp})"
)
return TemplateResponse(request, template=self.confirmed_template_name)
| agpl-3.0 | 4,024,145,903,745,246,000 | 32.830552 | 115 | 0.614556 | false | 4.013096 | false | false | false |
javiplx/cobbler-devel | django-webui/djangowebui/views.py | 1 | 25790 | from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
from django.http import HttpResponseRedirect
import xmlrpclib, time
my_uri = "http://127.0.0.1/cobbler_api"
remote = xmlrpclib.Server(my_uri)
token = remote.login('testing', 'testing')
def index(request):
t = get_template('index.tmpl')
html = t.render(Context({'version': remote.version(token)}))
return HttpResponse(html)
def search(request, what):
t = get_template('search.tmpl')
html = t.render(Context({'what':what, 'item_count':["1","2","3","4","5"]}))
return HttpResponse(html)
def dosearch(request, what):
criteria = {}
for i in range(1,6):
key = request.POST.get("key%d" % i, None)
val = request.POST.get("value%d" % i, None)
if key not in (None, ''):
if val != None:
val = val.replace('"','')
criteria[key] = val
results = []
if what == "distro":
results = remote.find_distro(criteria,True,token)
return distro_list(request, results)
elif what == "profile":
results = remote.find_profile(criteria,True,token)
return profile_list(request, results)
elif what == "system":
results = remote.find_system(criteria,True,token)
return system_list(request, results)
elif what == "image":
results = remote.find_image(criteria,True,token)
return image_list(request, results)
elif what == "repo":
results = remote.find_repo(criteria,True,token)
return repo_list(request, results)
else:
raise "internal error, unknown search type"
def __setup__pagination(object_list, page):
# TODO: currently hardcoded at 50 results per page
# not sure if this was a setting in the old webui
# (if not it should be)
prev_page = page - 1
next_page = page + 1
num_pages = (len(object_list)-1)/50 + 1
if num_pages > 1:
offset = (page-1) * 50
ending = offset + 50
if ending > len(object_list):
ending = len(object_list)
else:
offset = 0
ending = len(object_list)
if prev_page < 1:
prev_page = None
if next_page > num_pages:
next_page = None
return (num_pages,prev_page,next_page,offset,ending)
def distro_list(request, distros=None, page=None):
if distros is None:
distros = remote.get_distros(token)
if page is None and len(distros) > 50:
return HttpResponseRedirect('/cobbler_web/distro/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(distros,page)
if offset > len(distros):
return HttpResponseRedirect('/cobbler_web/distro/list/%d' % num_pages)
t = get_template('distro_list.tmpl')
html = t.render(Context({'what':'distro', 'distros': distros[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def distro_edit(request, distro_name=None):
available_arches = ['i386','x86','x86_64','ppc','ppc64','s390','s390x','ia64']
available_breeds = [['redhat','Red Hat Based'], ['debian','Debian'], ['ubuntu','Ubuntu'], ['suse','SuSE']]
distro = None
if not distro_name is None:
distro = remote.get_distro(distro_name, True, token)
distro['ctime'] = time.ctime(distro['ctime'])
distro['mtime'] = time.ctime(distro['mtime'])
t = get_template('distro_edit.tmpl')
html = t.render(Context({'distro': distro, 'available_arches': available_arches, 'available_breeds': available_breeds, "editable":True}))
return HttpResponse(html)
def distro_save(request):
# FIXME: error checking
field_list = ('name','comment','kernel','initrd','kopts','kopts','kopts_post','ksmeta','arch','breed','os_version','mgmt_classes','template_files','redhat_management_key','redhat_management_server')
new_or_edit = request.POST.get('new_or_edit','new')
editmode = request.POST.get('editmode', 'edit')
distro_name = request.POST.get('name', request.POST.get('oldname', None))
distro_oldname = request.POST.get('oldname', None)
if distro_name == None:
return HttpResponse("NO DISTRO NAME SPECIFIED")
if new_or_edit == 'new' or editmode == 'copy':
distro_id = remote.new_distro(token)
else:
if editmode == 'edit':
distro_id = remote.get_distro_handle(distro_name, token)
else:
if distro_name == distro_oldname:
return HttpResponse("The name was not changed, cannot %s" % editmode)
distro_id = remote.get_distro_handle(distro_oldname, token)
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
recursive = request.POST.get('recursive', False)
if new_or_edit == 'edit' and delete1 and delete2:
remote.remove_distro(distro_name, token, recursive)
return HttpResponseRedirect('/cobbler_web/distro/list')
else:
for field in field_list:
value = request.POST.get(field, None)
if field == 'name' and editmode == 'rename': continue
elif value != None:
remote.modify_distro(distro_id, field, value, token)
remote.save_distro(distro_id, token, new_or_edit)
if editmode == 'rename':
remote.rename_distro(distro_id, distro_name, token)
return HttpResponseRedirect('/cobbler_web/distro/edit/%s' % distro_name)
def profile_list(request, profiles=None, page=None):
if profiles is None:
profiles = remote.get_profiles(token)
if page is None and len(profiles) > 50:
return HttpResponseRedirect('/cobbler_web/profile/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(profiles,page)
if offset > len(profiles):
return HttpResponseRedirect('/cobbler_web/profile/list/%d' % num_pages)
for profile in profiles:
if profile["kickstart"]:
if profile["kickstart"].startswith("http://") or profile["kickstart"].startswith("ftp://"):
profile["web_kickstart"] = profile.kickstart
elif profile["kickstart"].startswith("nfs://"):
profile["nfs_kickstart"] = profile.kickstart
t = get_template('profile_list.tmpl')
html = t.render(Context({'what':'profile', 'profiles': profiles[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def profile_edit(request, profile_name=None, subprofile=0):
available_virttypes = [['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']]
profile = None
if not profile_name is None:
profile = remote.get_profile(profile_name, True, token)
if profile.has_key('ctime'):
profile['ctime'] = time.ctime(profile['ctime'])
if profile.has_key('mtime'):
profile['mtime'] = time.ctime(profile['mtime'])
distros = remote.get_distros(token)
profiles = remote.get_profiles(token)
repos = remote.get_repos(token)
t = get_template('profile_edit.tmpl')
html = t.render(Context({'profile': profile, 'subprofile': subprofile, 'profiles': profiles, 'distros': distros, 'editable':True, 'available_virttypes': available_virttypes}))
return HttpResponse(html)
def profile_save(request):
# FIXME: error checking
field_list = ('name','parent','profile','distro','enable_menu','kickstart','kopts','kopts_post','virt_auto_boot','virt_file_size','virt_ram','ksmeta','template_files','repos','virt_path','virt_type','virt_bridge','virt_cpus','dhcp_tag','server','owners','mgmt_classes','comment','name_servers','name_servers_search','redhat_management_key','redhat_management_server')
new_or_edit = request.POST.get('new_or_edit','new')
editmode = request.POST.get('editmode', 'edit')
profile_name = request.POST.get('name', request.POST.get('oldname', None))
profile_oldname = request.POST.get('oldname', None)
if profile_name == None:
return HttpResponse("NO PROFILE NAME SPECIFIED")
subprofile = int(request.POST.get('subprofile','0'))
if new_or_edit == 'new' or editmode == 'copy':
if subprofile:
profile_id = remote.new_subprofile(token)
else:
profile_id = remote.new_profile(token)
else:
if editmode == 'edit':
profile_id = remote.get_profile_handle(profile_name, token)
else:
if profile_name == profile_oldname:
return HttpResponse("The name was not changed, cannot %s" % editmode )
profile_id = remote.get_profile_handle(profile_oldname, token)
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
recursive = request.POST.get('recursive', False)
if new_or_edit == 'edit' and delete1 and delete2:
remote.remove_profile(profile_name, token, recursive)
return HttpResponseRedirect('/cobbler_web/profile/list')
else:
for field in field_list:
value = request.POST.get(field, None)
if field == "distro" and subprofile: continue
elif field == "parent" and not subprofile: continue
elif field == "name" and editmode == "rename": continue
if value != None:
remote.modify_profile(profile_id, field, value, token)
remote.save_profile(profile_id, token, new_or_edit)
if editmode == "rename":
remote.rename_profile(profile_id, profile_name, token)
return HttpResponseRedirect('/cobbler_web/profile/edit/%s' % profile_name)
def system_list(request, systems=None, page=None):
if systems is None:
systems = remote.get_systems(token)
if page is None and len(systems) > 50:
return HttpResponseRedirect('/cobbler_web/system/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(systems,page)
if offset > len(systems):
return HttpResponseRedirect('/cobbler_web/system/list/%d' % num_pages)
t = get_template('system_list.tmpl')
html = t.render(Context({'what':'system', 'systems': systems[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def system_edit(request, system_name=None, editmode="new"):
available_virttypes = [['<<inherit>>','<<inherit>>'],['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']]
available_power = ['','bullpap','wti','apc_snmp','ether-wake','ipmilan','drac','ipmitool','ilo','rsa','lpar','bladecenter','virsh','integrity']
system = None
if not system_name is None:
system = remote.get_system(system_name, True, token)
system['ctime'] = time.ctime(system['ctime'])
system['mtime'] = time.ctime(system['mtime'])
distros = remote.get_distros(token)
profiles = remote.get_profiles(token)
repos = remote.get_repos(token)
t = get_template('system_edit.tmpl')
html = t.render(Context({'system': system, 'profiles': profiles, 'distros': distros, 'repos': repos, 'editmode': editmode, 'available_virttypes': available_virttypes, 'available_power': available_power, 'editable':True}))
return HttpResponse(html)
def system_save(request):
# FIXME: error checking
field_list = ('name','profile','kopts','kopts_post','ksmeta','owners','netboot_enabled','server','virt_file_size','virt_cpus','virt_ram','virt_type','virt_path','virt_auto_boot','comment','power_type','power_user','power_pass','power_id','power_address','name_servers','name_servers_search','gateway','hostname','redhat_management_key','redhat_management_server','mgmt_classes')
interface_field_list = ('macaddress','ipaddress','dns_name','static_routes','static','virtbridge','dhcptag','subnet','bonding','bondingopts','bondingmaster','present','original')
editmode = request.POST.get('editmode', 'edit')
system_name = request.POST.get('name', request.POST.get('oldname', None))
system_oldname = request.POST.get('oldname', None)
interfaces = request.POST.get('interface_list', "").split(",")
if system_name == None:
return HttpResponse("NO SYSTEM NAME SPECIFIED")
if editmode == 'copy':
system_id = remote.new_system(token)
else:
if editmode == 'edit':
system_id = remote.get_system_handle(system_name, token)
else:
if system_name == system_oldname:
return HttpResponse("The name was not changed, cannot %s" % editmode)
system_id = remote.get_system_handle(system_oldname, token)
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
if delete1 and delete2:
remote.remove_system(system_name, token, recursive)
return HttpResponseRedirect('/cobbler_web/system/list')
else:
for field in field_list:
value = request.POST.get(field, None)
if field == 'name' and editmode == 'rename': continue
elif value != None:
remote.modify_system(system_id, field, value, token)
for interface in interfaces:
ifdata = {}
for item in interface_field_list:
ifdata["%s-%s" % (item,interface)] = request.POST.get("%s-%s" % (item,interface), "")
if ifdata['present-%s' % interface] == "0" and ifdata['original-%s' % interface] == "1":
remote.modify_system(system_id, 'delete_interface', interface, token)
elif ifdata['present-%s' % interface] == "1":
remote.modify_system(system_id, 'modify_interface', ifdata, token)
remote.save_system(system_id, token, editmode)
if editmode == 'rename':
remote.rename_system(system_id, system_name, token)
return HttpResponseRedirect('/cobbler_web/system/edit/%s' % system_name)
def system_rename(request, system_name=None, system_newname=None):
if system_name == None:
return HttpResponse("You must specify a system to rename")
elif system_newname == None:
t = get_template('system_rename.tmpl')
html = t.render(Context({'system':system_name}))
return HttpResponse(html)
else:
system_id = remote.get_system_handle(system_name, token)
remote.rename_system(system_id, system_newname, token)
return HttpResponseRedirect("/cobbler_web/system/list")
def system_multi(request, multi_mode=None):
items = request.POST.getlist('items')
all_systems = remote.get_systems(token)
sel_systems = []
sel_names = []
for system in all_systems:
if system['name'] in items:
sel_systems.append(system)
sel_names.append(system['name'])
profiles = []
if multi_mode == "profile":
profiles = remote.get_profiles(token)
t = get_template('system_%s.tmpl' % multi_mode)
html = t.render(Context({'systems':sel_systems, 'profiles':profiles, 'items':sel_names}))
return HttpResponse(html)
def system_domulti(request, multi_mode=None):
items = request.POST.get('items', '').split(" ")
netboot_enabled = request.POST.get('netboot_enabled', None)
profile = request.POST.get('profile', None)
power = request.POST.get('power', None)
for system_name in items:
system_id = remote.get_system_handle(system_name, token)
if multi_mode == "delete":
remote.remove_system(system_name, token)
elif multi_mode == "netboot":
if netboot_enabled is None:
raise "Cannot modify systems without specifying netboot_enabled"
remote.modify_system(system_id, "netboot_enabled", netboot_enabled, token)
remote.save_system(system_id, token, "edit")
elif multi_mode == "profile":
if profile is None:
raise "Cannot modify systems without specifying profile"
remote.modify_system(system_id, "profile", profile, token)
remote.save_system(system_id, token, "edit")
elif multi_mode == "power":
if power is None:
raise "Cannot modify systems without specifying power option"
try:
remote.power_system(system_id, power, token)
except:
# TODO: something besides ignore. We should probably
# print out an error message at the top of whatever
# page we go to next, whether it's the system list
# or a results page
pass
else:
raise "Unknowm multiple operation on systems: %s" % str(multi_mode)
return HttpResponseRedirect("/cobbler_web/system/list")
def repo_list(request, repos=None, page=None):
if repos is None:
repos = remote.get_repos(token)
if page is None and len(repos) > 50:
return HttpResponseRedirect('/cobbler_web/repo/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(repos,page)
if offset > len(repos):
return HttpResponseRedirect('/cobbler_web/repo/list/%d' % num_pages)
t = get_template('repo_list.tmpl')
html = t.render(Context({'what':'repo', 'repos': repos[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def repo_edit(request, repo_name=None):
available_arches = ['i386','x86','x86_64','ppc','ppc64','s390','s390x','ia64','noarch','src']
repo = None
if not repo_name is None:
repo = remote.get_repo(repo_name, True, token)
repo['ctime'] = time.ctime(repo['ctime'])
repo['mtime'] = time.ctime(repo['mtime'])
t = get_template('repo_edit.tmpl')
html = t.render(Context({'repo': repo, 'available_arches': available_arches, "editable":True}))
return HttpResponse(html)
def repo_save(request):
# FIXME: error checking
field_list = ('name','mirror','keep_updated','priority','mirror_locally','rpm_list','createrepo_flags','arch','yumopts','environment','owners','comment')
editmode = request.POST.get('editmode', 'edit')
repo_name = request.POST.get('name', request.POST.get('oldname', None))
repo_oldname = request.POST.get('oldname', None)
if repo_name == None:
return HttpResponse("NO SYSTEM NAME SPECIFIED")
if editmode == 'copy':
repo_id = remote.new_repo(token)
else:
if editmode == 'edit':
repo_id = remote.get_repo_handle(repo_name, token)
else:
if repo_name == repo_oldname:
return HttpResponse("The name was not changed, cannot %s" % editmode)
repo_id = remote.get_repo_handle(repo_oldname, token)
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
if delete1 and delete2:
remote.remove_repo(repo_name, token)
return HttpResponseRedirect('/cobbler_web/repo/list')
else:
for field in field_list:
value = request.POST.get(field, None)
if field == 'name' and editmode == 'rename': continue
elif field in ('keep_updated','mirror_locally'):
if field in request.POST:
remote.modify_repo(repo_id, field, "1", token)
else:
remote.modify_repo(repo_id, field, "0", token)
elif value != None:
remote.modify_repo(repo_id, field, value, token)
remote.save_repo(repo_id, token, editmode)
if editmode == 'rename':
remote.rename_repo(repo_id, repo_name, token)
return HttpResponseRedirect('/cobbler_web/repo/edit/%s' % repo_name)
def image_list(request, images=None, page=None):
if images is None:
images = remote.get_images(token)
if page is None and len(images) > 50:
return HttpResponseRedirect('/cobbler_web/image/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(images,page)
if offset > len(images):
return HttpResponseRedirect('/cobbler_web/image/list/%d' % num_pages)
t = get_template('image_list.tmpl')
html = t.render(Context({'what':'image', 'images': images[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def image_edit(request, image_name=None):
available_arches = ['i386','x86_64']
available_breeds = [['redhat','Red Hat Based'], ['debian','Debian'], ['ubuntu','Ubuntu'], ['suse','SuSE']]
available_virttypes = [['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']]
available_imagetypes = ['direct','iso','memdisk','virt-clone']
image = None
if not image_name is None:
image = remote.get_image(image_name, True, token)
image['ctime'] = time.ctime(image['ctime'])
image['mtime'] = time.ctime(image['mtime'])
t = get_template('image_edit.tmpl')
html = t.render(Context({'image': image, 'available_arches': available_arches, 'available_breeds': available_breeds, 'available_virttypes': available_virttypes, 'available_imagetypes': available_imagetypes, "editable":True}))
return HttpResponse(html)
def image_save(request):
# FIXME: error checking
field_list = ('name','image_type','breed','os_version','arch','file','owners','virt_cpus','network_count','virt_file_size','virt_path','virt_bridge','virt_ram','virt_type','virt_auto_boot','comment')
editmode = request.POST.get('editmode', 'edit')
image_name = request.POST.get('name', request.POST.get('oldname', None))
image_oldname = request.POST.get('oldname', None)
if image_name == None:
return HttpResponse("NO SYSTEM NAME SPECIFIED")
if editmode == 'copy':
image_id = remote.new_image(token)
else:
if editmode == 'edit':
image_id = remote.get_image_handle(image_name, token)
else:
if image_name == image_oldname:
return HttpResponse("The name was not changed, cannot %s" % editmode)
image_id = remote.get_image_handle(image_oldname, token)
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
recursive = request.POST.get('recursive', False)
if delete1 and delete2:
remote.remove_image(image_name, token, recursive)
return HttpResponseRedirect('/cobbler_web/image/list')
else:
for field in field_list:
value = request.POST.get(field, None)
if field == 'name' and editmode == 'rename': continue
elif value != None:
remote.modify_image(image_id, field, value, token)
remote.save_image(image_id, token, editmode)
if editmode == 'rename':
remote.rename_image(image_id, image_name, token)
return HttpResponseRedirect('/cobbler_web/image/edit/%s' % image_name)
def ksfile_list(request, page=None):
ksfiles = remote.get_kickstart_templates(token)
if page is None and len(ksfiles) > 50:
return HttpResponseRedirect('/cobbler_web/ksfiles/list/1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
(num_pages,prev_page,next_page,offset,ending) = __setup__pagination(ksfiles,page)
if offset > len(ksfiles):
return HttpResponseRedirect('/cobbler_web/ksfiles/list/%d' % num_pages)
ksfile_list = []
for ksfile in ksfiles:
if ksfile.startswith("/var/lib/cobbler/kickstarts") or ksfile.startswith("/etc/cobbler"):
ksfile_list.append((ksfile,'editable'))
elif ksfile["kickstart"].startswith("http://") or ksfile["kickstart"].startswith("ftp://"):
ksfile_list.append((ksfile,'viewable'))
else:
ksfile_list.append((ksfile,None))
t = get_template('ksfile_list.tmpl')
html = t.render(Context({'what':'ksfile', 'ksfiles': ksfile_list[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page}))
return HttpResponse(html)
def ksfile_edit(request, ksfile_name=None, editmode='edit'):
if editmode == 'edit':
editable = False
else:
editable = True
deleteable = False
ksdata = ""
if not ksfile_name is None:
editable = remote.check_access_no_fail(token, "modify_kickstart", ksfile_name)
deleteable = remote.is_kickstart_in_use(ksfile_name, token)
ksdata = remote.read_or_write_kickstart_template(ksfile_name, True, "", token)
t = get_template('ksfile_edit.tmpl')
html = t.render(Context({'ksfile_name':ksfile_name, 'deleteable':deleteable, 'ksdata':ksdata, 'editable':editable, 'editmode':editmode}))
return HttpResponse(html)
def ksfile_save(request):
# FIXME: error checking
editmode = request.POST.get('editmode', 'edit')
ksfile_name = request.POST.get('ksfile_name', None)
ksdata = request.POST.get('ksdata', "")
if ksfile_name == None:
return HttpResponse("NO KSFILE NAME SPECIFIED")
if editmode != 'edit':
ksfile_name = "/var/lib/cobbler/kickstarts/" + ksfile_name
delete1 = request.POST.get('delete1', None)
delete2 = request.POST.get('delete2', None)
if delete1 and delete2:
remote.read_or_write_kickstart_template(ksfile_name, False, -1, token)
return HttpResponseRedirect('/cobbler_web/ksfile/list')
else:
remote.read_or_write_kickstart_template(ksfile_name,False,ksdata,token)
return HttpResponseRedirect('/cobbler_web/ksfile/edit/%s' % ksfile_name)
def random_mac(request, virttype="xenpv"):
random_mac = remote.get_random_mac(virttype, token)
return HttpResponse(random_mac)
def dosync(request):
remote.sync(token)
return HttpResponseRedirect("/cobbler_web/")
| gpl-2.0 | -8,607,288,348,623,287,000 | 39.296875 | 381 | 0.647577 | false | 3.516978 | false | false | false |
mathLab/RBniCS | rbnics/problems/nonlinear_parabolic/nonlinear_parabolic_problem.py | 1 | 2210 | # Copyright (C) 2015-2021 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.problems.base import NonlinearTimeDependentProblem
from rbnics.problems.nonlinear_elliptic import NonlinearEllipticProblem
from rbnics.backends import product, sum
NonlinearParabolicProblem_Base = NonlinearTimeDependentProblem(NonlinearEllipticProblem)
class NonlinearParabolicProblem(NonlinearParabolicProblem_Base):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call to parent
NonlinearParabolicProblem_Base.__init__(self, V, **kwargs)
# Form names for parabolic problems
self.terms.append("m")
self.terms_order.update({"m": 2})
class ProblemSolver(NonlinearParabolicProblem_Base.ProblemSolver):
def residual_eval(self, t, solution, solution_dot):
problem = self.problem
assembled_operator = dict()
assembled_operator["m"] = sum(product(problem.compute_theta("m"), problem.operator["m"]))
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["c"] = sum(product(problem.compute_theta("c"), problem.operator["c"]))
assembled_operator["f"] = sum(product(problem.compute_theta("f"), problem.operator["f"]))
return (assembled_operator["m"] * solution_dot
+ assembled_operator["a"] * solution
+ assembled_operator["c"]
- assembled_operator["f"])
def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient):
problem = self.problem
assembled_operator = dict()
assembled_operator["m"] = sum(product(problem.compute_theta("m"), problem.operator["m"]))
assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"]))
assembled_operator["dc"] = sum(product(problem.compute_theta("dc"), problem.operator["dc"]))
return (assembled_operator["m"] * solution_dot_coefficient
+ assembled_operator["a"]
+ assembled_operator["dc"])
| lgpl-3.0 | -2,181,995,800,380,480,500 | 47.043478 | 104 | 0.646606 | false | 3.9254 | false | false | false |
azami/torimotsu | src/torimotsu/notofication.py | 1 | 2207 | # -*- coding: utf-8 -*-
from enum import Enum
from slackclient import SlackClient
from torimotsu import settings
class SendError(Exception):
def __init__(self, message):
self.message = message
class MealTimeEmoji(Enum):
fork_and_knife = 1
doughnut = 2
ramen = 3
ice_cream = 4
sake = 5
cookie = 6
class Notifier(SlackClient):
def __init__(self, log):
super().__init__(token=settings.slack.token)
self.log = log
self.channel = settings.slack.channel
def post_foods(self):
lines = ['{}のたべものきろく。'.format(self.log.yesterday.strftime('%Y-%m-%d'))]
food_log = self.log.fetch_foods()
for (food_time, foods) in food_log.foods.items():
sum_calories = sum((food['loggedFood']['calories'] for food in foods))
lines.append(':{}: *{}* {}㌔㌍'.format(MealTimeEmoji(food_time.value).name,
food_time.name, sum_calories))
lines.append('```')
for food in foods:
lines.append('{calories:>4}㌔㌍ {name:25}: {amount}{unit_}'.format(
unit_=food['loggedFood']['unit']['plural'],
**food['loggedFood']))
lines.append('```')
lines.append('')
lines.append(':yum: *{}* ㌔㌍摂取したよ。'.format(food_log.summary['calories']))
if food_log.goals:
lines.append(':yum: *{}* ㌔㌍が上限目標だよ。'.format(food_log.goals['calories']))
diff = food_log.diff
if diff > 0:
lines.append(':innocent: *{}* ㌔㌍セーブしたよ。やったね。'.format(diff))
else:
lines.append(':imp: *{}* ㌔㌍余分にたべてしまいました。罪深い。'.format(diff * -1))
else:
lines.append('目標㌍は取得できなかった。FitbitAPIバグっているのでは???')
self.send_slack('\n'.join(lines))
def send_slack(self, text):
response = self.api_call('chat.postMessage', channel=self.channel, text=text)
if not response['ok']:
raise SendError(response['message'])
| mit | -7,733,427,983,693,029,000 | 33.389831 | 85 | 0.54411 | false | 2.890313 | false | false | false |
the-raspberry-pi-guy/lidar | pi_approach/UI/lidar.py | 1 | 9853 | # lidar.py
# Code to control the touchscreen user interface subsystem
# Fully networked and touch enabled - with easy manipulation of generated maps
# Author: Matthew Timmons-Brown
# Import necessary libraries for control of different aspects
import socket
import math
import time
import sys
import subprocess
import threading
import random
# Import Kivy elements and tools that will be used for the user interface
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
# Import image manipulation tools
from PIL import Image, ImageDraw
# Import library that I have created to make communication and control easier
sys.path.insert(0, "/home/pi/lidar/pi_approach/Libraries")
import serverxclient as serv
powerdown = ["sudo", "shutdown", "now"]
server = serv.Server()
# Set the distance and stepper connection to false (as have not connected)
distance = False
stepper = False
# Initialise distance and stepper connections, but IP addresses so far unknown
distance_connection = 0
stepper_connection = 0
# Set accuracy limit for sensor, any value above it will be rejected (mm)
accuracy_limit = 4000
class Communication(threading.Thread):
"""A communication thread that connects to other subsystems in the background"""
# Run method - automatically run when thread is started
# Constantly waits for other two subsystems to come online, then changes to the main application page
def run(self):
self.setup()
# While either of the subsystems are not connected
while (distance == False) or (stepper == False):
(connection, address) = self.awaiting_socket()
print (connection, address)
self.test_socket(connection)
# Wait 2 seconds, then change to main screen
time.sleep(2)
application.current = "main"
# Setup method
# Sets up a server for subsystems to connect to
def setup(self):
server.setup_server()
print "SUCCESS ON BIND"
# Awaiting socket method
# Waits for an incoming socket and then returns that socket's connection and address details
def awaiting_socket(self):
print "AWAITING"
(connection, address) = server.socket_reception()
return (connection, address)
# Test socket
# Identifies which subsystem the incoming connection is and changes global variables to indicate correct pairing
def test_socket(self, connection):
# Demands verification from subsystem
server.send_data(connection,"VERIFY?")
data_back = server.receive_data(connection)
# If data_back is either subsystem, then change the Init screen labels from NO to OK!
if data_back == "DISTANCE!":
# set distance to OK
application.current_screen.distance_on()
# Update global variables with connection details
global distance, distance_connection
distance = True
distance_connection = connection
if data_back == "STEPPER!":
# set stepper to OK
application.current_screen.stepper_on()
# Update global variables with connection details
global stepper, stepper_connection
stepper = True
stepper_connection = connection
print "Finished testing socket"
class InitScreen(Screen):
"""A class to define the behaviour of the InitScreen"""
# Power off method
# If shutdown switch is toggled, turn off device
def power_off(self, *args):
# Connection to Kivy element through the use of labels
onoffswitch = self.ids["onoffswitch"]
onoff_value = onoffswitch.active
# If the switch is false, turn the system off
if onoff_value == False:
subprocess.call(powerdown)
# Distance ON! method
# Changes the "NO" distance label to "OK!" when called
def distance_on(self, *args):
distance_label = self.ids["distance_label"]
distance_label.text = "[size=40]Distance:[/size]\n\n[size=60][color=008000]OK[/color][/size]" # (Markup text)
# Stepper ON! method
# Changes the "NO" stepper label to "OK!" when called
def stepper_on(self, *args):
stepper_label = self.ids["stepper_label"]
stepper_label.text = "[size=40]Stepper:[/size]\n\n[size=60][color=008000]OK[/color][/size]" # (Markup text)
class MainScreen(Screen):
"""A class to define the behaviour of the MainScreen"""
# Current stepper motor angle
angle = 0
# Power off method
# If shutdown switch is toggled, turn off other subsystems and shut down this device
def power_off(self, *args):
onoffswitch = self.ids["onoffswitch2"]
onoff_value = onoffswitch.active
if onoff_value == False:
# Send commands to other subsystems and then shut down
server.send_data(distance_connection, "POWER-OFF")
server.send_data(stepper_connection, "POWER-OFF")
subprocess.call(powerdown)
# Change value method
# When the slider is changed, adapt the value label to reflect its value
def change_value(self, *args):
value_slider = self.ids["value_slider"]
self.angle = int(value_slider.value)
value_label = self.ids["value_label"]
# Change label to slider's current value
value_label.text = "[size=10]" + str(self.angle) + "[/size]"
# Scan method
# Called when the SCAN button is pressed
# Collects data from distance subsytem and stepper motor subsystem
# Outputs map to the user
def scan(self, *args):
enable_lidar = self.ids["enable_lidar"]
# If the lidar button is actually enabled, then proceed with the scan
if enable_lidar.state == "down":
print "Now contacting and getting data"
# Create arrays for the distances and angle that they were recorded at
distances = []
positions = []
# Create angle copy to reset when process has finished
angle_copy = self.angle
# For loop to discard the first 20 readings from the distance sensor
# Sensor is cheap and found that the first 20 odd values are not usually consistent - so discard them
for i in range(0,20):
server.send_data(distance_connection, "FIRE")
discarded_response = server.receive_data(distance_connection)
time.sleep(0.1)
# While there is still an angle left to scan, do:
while self.angle+1.8 > 0:
# Demand distance from distance subsystem
server.send_data(distance_connection, "FIRE")
distance_response = server.receive_data(distance_connection)
# While the distance is greater than the accuracy limit, and the attempts are less than 3, try again
# in the hope to get better data.
tries = 0
while (float(distance_response[:-2]) > accuracy_limit) and (tries < 3):
server.send_data(distance_connection, "FIRE")
distance_response = server.receive_data(distance_connection)
tries += 1
# Demand current position of stepper motor, and then rotate by 1 step for the next distance
server.send_data(stepper_connection, "REPORT-ROTATE")
stepper_position = server.receive_data(stepper_connection)
# Convert the values into floats and remove unnecessary elements of communication
point_distance = float(distance_response[:-2])
point_position = float(stepper_position)
print (point_position, point_distance)
# If distance is within the accuracy_limit, store and record distance
# Otherwise distance is not recorded. This is to prevent outliers
if point_distance <= accuracy_limit:
distances.append(point_distance)
positions.append(point_position)
# -1.8 from angle as scan complete
self.angle -= 1.8
# Reset current angle
self.angle = angle_copy
# Draw map with the distances and position data that has been gathered
source = self.draw_map(distances, positions)
# Display the outputted PNG image to the user for manipulation and viewing
output_image = self.ids["output_image"]
output_image.source = source
else:
print "Nothing enabled"
# Draw map method
# Main map drawing algorithm - creates image from supplied distances and position data and returns path to that image
def draw_map(self, distance_array, angle_array):
# Dimensions for the image
dimensions = (700,380)
points = len(distance_array)-1
centre_x = dimensions[0]/2
centre_y = dimensions[1]/2
# Create a scaling factor for the end image to ensure points are within the allocated space
scaler = (centre_x+accuracy_limit)/dimensions[0]
# Open a new image with the dimensions previous
map = Image.new("RGBA", dimensions)
# Set image up for drawing
draw = ImageDraw.Draw(map)
# Draw a point in the centre of the image to represent where the scanner is
draw.point((centre_x, centre_y), (1,1,1))
# For all the pieces of data, do:
for i in range(0, points):
# Use trigonometry to calculate the position of the point to plot on map
sine_distance = (math.sin(math.radians(angle_array[i]))*(distance_array[i]))
cosi_distance = (math.cos(math.radians(angle_array[i]))*(distance_array[i]))
length_x = cosi_distance
length_y = sine_distance
# Divide by scaling factor to keep within the dimensions of the image
length_x = length_x/scaler
length_y = length_y/scaler
# Create set of coordinates to plot
coord_x = centre_x + length_x
coord_y = centre_y + length_y
coords = (coord_x, coord_y)
print coords
# Draw coordinates on map
draw.point(coords, (1,1,1))
# Create a new image path and return it
path = "/home/pi/lidar/pi_approach/UI/scans/" + str(random.randint(0,1000)) + ".png"
map.save(path, "PNG")
return path
class ScreenManagement(ScreenManager):
"""Screen Manager - does behind-the-scenes screen management for transition between Init and Main screen"""
pass
# Load up Kivy file that defines how the UI looks
application = Builder.load_file("main.kv")
class LidarApp(App):
"""Build actual application and return it"""
def build(self):
return application
# If run, start communication thread and run the application
if __name__ == "__main__":
checker = Communication()
checker.daemon = True
checker.start()
LidarApp().run()
| mit | 7,342,553,851,189,937,000 | 34.829091 | 118 | 0.727697 | false | 3.581607 | false | false | false |
pallets/jinja2 | docs/conf.py | 1 | 1795 | from pallets_sphinx_themes import get_version
from pallets_sphinx_themes import ProjectLink
# Project --------------------------------------------------------------
project = "Jinja"
copyright = "2007 Pallets"
author = "Pallets"
release, version = get_version("Jinja2")
# General --------------------------------------------------------------
master_doc = "index"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"pallets_sphinx_themes",
"sphinxcontrib.log_cabinet",
"sphinx_issues",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
issues_github_path = "pallets/jinja"
# HTML -----------------------------------------------------------------
html_theme = "jinja"
html_theme_options = {"index_sidebar_logo": False}
html_context = {
"project_links": [
ProjectLink("Donate to Pallets", "https://palletsprojects.com/donate"),
ProjectLink("Jinja Website", "https://palletsprojects.com/p/jinja/"),
ProjectLink("PyPI releases", "https://pypi.org/project/Jinja2/"),
ProjectLink("Source Code", "https://github.com/pallets/jinja/"),
ProjectLink("Issue Tracker", "https://github.com/pallets/jinja/issues/"),
]
}
html_sidebars = {
"index": ["project.html", "localtoc.html", "searchbox.html"],
"**": ["localtoc.html", "relations.html", "searchbox.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]}
html_static_path = ["_static"]
html_favicon = "_static/jinja-logo-sidebar.png"
html_logo = "_static/jinja-logo-sidebar.png"
html_title = f"Jinja Documentation ({version})"
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [(master_doc, f"Jinja-{version}.tex", html_title, author, "manual")]
| bsd-3-clause | -3,630,792,903,349,237,000 | 34.9 | 86 | 0.579944 | false | 3.582834 | false | true | false |
virtool/virtool | virtool/jobs/api.py | 2 | 3295 | import os
import virtool.api.utils
import virtool.http.routes
import virtool.jobs.db
import virtool.resources
import virtool.users.db
import virtool.utils
from virtool.api.response import conflict, json_response, no_content, not_found
routes = virtool.http.routes.Routes()
@routes.get("/api/jobs")
async def find(req):
"""
Return a list of job documents.
"""
db = req.app["db"]
term = req.query.get("find")
db_query = dict()
if term:
db_query.update(virtool.api.utils.compose_regex_query(term, ["task", "user.id"]))
data = await virtool.api.utils.paginate(
db.jobs,
db_query,
req.query,
projection=virtool.jobs.db.PROJECTION
)
data["documents"].sort(key=lambda d: d["created_at"])
return json_response(data)
@routes.get("/api/jobs/{job_id}")
async def get(req):
"""
Return the complete document for a given job.
"""
job_id = req.match_info["job_id"]
document = await req.app["db"].jobs.find_one(job_id)
if not document:
return not_found()
return json_response(virtool.utils.base_processor(document))
@routes.put("/api/jobs/{job_id}/cancel", permission="cancel_job")
async def cancel(req):
"""
Cancel a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id, ["status"])
if not document:
return not_found()
if not virtool.jobs.is_running_or_waiting(document):
return conflict("Not cancellable")
await req.app["jobs"].cancel(job_id)
document = await db.jobs.find_one(job_id)
return json_response(virtool.utils.base_processor(document))
@routes.delete("/api/jobs", permission="remove_job")
async def clear(req):
db = req.app["db"]
job_filter = req.query.get("filter")
# Remove jobs that completed successfully.
complete = job_filter in [None, "finished", "complete"]
# Remove jobs that errored or were cancelled.
failed = job_filter in [None, "finished", "failed"]
removed = await virtool.jobs.db.clear(db, complete=complete, failed=failed)
return json_response({
"removed": removed
})
@routes.delete("/api/jobs/{job_id}", permission="remove_job")
async def remove(req):
"""
Remove a job.
"""
db = req.app["db"]
job_id = req.match_info["job_id"]
document = await db.jobs.find_one(job_id)
if not document:
return not_found()
if virtool.jobs.is_running_or_waiting(document):
return conflict("Job is running or waiting and cannot be removed")
# Removed the documents associated with the job ids from the database.
await db.jobs.delete_one({"_id": job_id})
try:
# Calculate the log path and remove the log file. If it exists, return True.
path = os.path.join(req.app["settings"]["data_path"], "logs", "jobs", job_id + ".log")
await req.app["run_in_thread"](virtool.utils.rm, path)
except OSError:
pass
return no_content()
@routes.get("/api/resources")
async def get_resources(req):
"""
Get a object describing compute resource usage on the server.
"""
resources = virtool.resources.get()
req.app["resources"].update(resources)
return json_response(resources)
| mit | -2,214,268,685,578,211,800 | 22.535714 | 94 | 0.641882 | false | 3.479409 | false | false | false |
mattpatey/flash-card-generator | flashcardgenerator/generator.py | 1 | 3498 |
import argparse
import codecs
import logging
import os
import pickle
from pkg_resources import (
resource_string,
yield_lines,
)
import sys
import data
from rendering import CardRenderer
from translation import (
DictionaryParser,
NoTypeIndicatorException,
ParseException,
Translator,
UnknownVariantTypeException,
VariantParseException,
WordNotFoundException,
)
def parse_dictionary(dictionary_file):
"""
Create a data structure from the dictionary.
"""
entries = dict()
dict_parser = DictionaryParser()
with codecs.open(dictionary_file, 'r', encoding='utf-8') as lines:
for line in lines:
if line.startswith('#'):
continue
try:
word, translation = dict_parser.parse_line(line)
if not translation:
logger.error(u"Couldn't find translation for '%s'" % line)
continue
entry_key = unicode(word)
if not entries.get(entry_key):
entries[entry_key] = word, translation
else:
logger.info("Skipping duplicate entry for '%s'." % entry_key)
except ParseException, e:
logger.warn(u"Parse error: '%s'" % e)
except NoTypeIndicatorException, e:
logger.warn(u"Couldn't figure out word type for line: '%s'" % e)
except VariantParseException, e:
logger.warn(u"Couldn't parse some variants: '%s'" % e)
except UnknownVariantTypeException, e:
logger.warn(u"Not sure what a '%s' is." % e)
return entries
def create_dictionary_pickle():
logger.info("Creating dictionary pickle.")
d = parse_dictionary('flashcardgenerator/data/de-en.txt')
with open('dictionary.pkl', 'w') as f:
pickle.dump(d, f)
def translate(word, translator):
try:
return translator.lookup(word)
except WordNotFoundException:
logging.warn("Couldn't find translation for '%s'." % word)
raise
if __name__ == '__main__':
logger = logging.getLogger()
handler = logging.FileHandler('flash-card-generator.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
parser = argparse.ArgumentParser('Generate flash cards and lookup German to English translations.')
parser.add_argument('--word-file', type=str)
parser.add_argument('--lookup', type=str)
args = parser.parse_args()
if not os.path.exists('dictionary.pkl'):
create_dictionary_pickle()
with open('dictionary.pkl', 'r') as lookup_table_file:
lookup_table = pickle.load(lookup_table_file)
translator = Translator(lookup_table)
if args.lookup:
print translate(args.lookup.strip(), translator)
sys.exit(0)
if args.word_file:
words_and_translations = []
with codecs.open(args.word_file, 'r', encoding='utf-8') as lines:
for word in lines:
try:
original, translation = translate(word.strip(), translator)
except WordNotFoundException:
continue
words_and_translations.append((original, translation))
renderer = CardRenderer()
renderer.render_cards(words_and_translations, '/tmp/test.pdf')
sys.exit(0)
| gpl-2.0 | -6,273,199,768,317,407,000 | 30.513514 | 103 | 0.612922 | false | 4.318519 | false | false | false |
ntt-pf-lab/backup_keystone | keystone/test/functional/test_ext_raxkskey.py | 1 | 1053 | import unittest2 as unittest
from keystone.test.functional import common
class TestExtensions(common.ApiTestCase):
def test_extensions_json(self):
r = self.service_request(path='/extensions.json',
assert_status=200)
self.assertTrue('json' in r.getheader('Content-Type'))
content = r.json
self.assertIsNotNone(content['extensions'])
self.assertIsNotNone(content['extensions']['values'])
found = False
for value in content['extensions']['values']:
if value['extension']['alias'] == 'RAX-KSKEY':
found = True
break
self.assertTrue(found)
def test_extensions_xml(self):
r = self.service_request(path='/extensions.xml')
self.assertTrue('xml' in r.getheader('Content-Type'))
content = r.xml
extension = content.find(
"{http://docs.openstack.org/common/api/v2.0}extension")
self.assertEqual(extension.get("alias"), "RAX-KSKEY")
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 2,277,020,078,303,646,700 | 34.1 | 67 | 0.613485 | false | 4.195219 | true | false | false |
dustinlacewell/loom | loom/system.py | 1 | 1864 | from fabric.api import run, settings, sudo, open_shell, cd, env, local
from fabric.operations import put, prompt, get
def gethostip(hostname):
"get ip of hostname"
output = run('gethostip ' + hostname, true)
parts = output.split(' ')
return parts[1]
def run_daemon_cmd(name, command):
"run a daemon command"
run("/etc/init.d/%s %s" % (name, command))
def mount(mountpoint):
"mount specified mountpoint"
run("mount %s" % (mountpoint, ))
def unmount(mountpoint):
"unmount specified mountpoint"
run("umount %s" % (mountpoint, ))
def add_sshfs_mount(*args):
"install a list of sshfs mountpoints"
FSTAB_PATTERN = "sshfs#{host}:{remotepath}\t{mountpoint}\tfuse\tdefaults,allow_other,exec,reconnect,transform_symlinks\t0 0"
for mount in args:
host = mount['host']
remotepath = mount['remotepath']
mountpoint = mount['mountpoint']
excludes = mount['excludes']
if env.host in excludes:
print '%s is excluded from mountpoint.' % (env.host,)
continue
add_mount_point = True
tmp_path = '/tmp/fstab.tmp'
get("/etc/fstab", tmp_path)
fstab_entry = FSTAB_PATTERN.format(host=host,
remotepath=remotepath,
mountpoint=mountpoint,)
with open(tmp_path, 'r') as file:
for line in file.readlines():
if mountpoint in line:
add_mount_point = False
if add_mount_point:
with open(tmp_path, 'a') as file:
file.write(fstab_entry + "\n\n")
put(tmp_path, "/etc/fstab")
with settings(warn_only=True):
run('mkdir ' + mountpoint)
run('umount ' + mountpoint)
run('mount ' + mountpoint)
| mit | -2,859,233,683,230,947,300 | 32.285714 | 128 | 0.564914 | false | 3.859213 | false | false | false |
tundebabzy/frappe | frappe/data_migration/doctype/data_migration_plan/data_migration_plan.py | 11 | 2498 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.modules import get_module_path, scrub_dt_dn
from frappe.modules.export_file import export_to_files, create_init_py
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
from frappe.model.document import Document
class DataMigrationPlan(Document):
def on_update(self):
# update custom fields in mappings
self.make_custom_fields_for_mappings()
if frappe.flags.in_import or frappe.flags.in_test:
return
if frappe.local.conf.get('developer_mode'):
record_list =[['Data Migration Plan', self.name]]
for m in self.mappings:
record_list.append(['Data Migration Mapping', m.mapping])
export_to_files(record_list=record_list, record_module=self.module)
for m in self.mappings:
dt, dn = scrub_dt_dn('Data Migration Mapping', m.mapping)
create_init_py(get_module_path(self.module), dt, dn)
def make_custom_fields_for_mappings(self):
frappe.flags.ignore_in_install = True
label = self.name + ' ID'
fieldname = frappe.scrub(label)
df = {
'label': label,
'fieldname': fieldname,
'fieldtype': 'Data',
'hidden': 1,
'read_only': 1,
'unique': 1,
'no_copy': 1
}
for m in self.mappings:
mapping = frappe.get_doc('Data Migration Mapping', m.mapping)
create_custom_field(mapping.local_doctype, df)
mapping.migration_id_field = fieldname
mapping.save()
# Create custom field in Deleted Document
create_custom_field('Deleted Document', df)
frappe.flags.ignore_in_install = False
def pre_process_doc(self, mapping_name, doc):
module = self.get_mapping_module(mapping_name)
if module and hasattr(module, 'pre_process'):
return module.pre_process(doc)
return doc
def post_process_doc(self, mapping_name, local_doc=None, remote_doc=None):
module = self.get_mapping_module(mapping_name)
if module and hasattr(module, 'post_process'):
return module.post_process(local_doc=local_doc, remote_doc=remote_doc)
def get_mapping_module(self, mapping_name):
try:
module_def = frappe.get_doc("Module Def", self.module)
module = frappe.get_module('{app}.{module}.data_migration_mapping.{mapping_name}'.format(
app= module_def.app_name,
module=frappe.scrub(self.module),
mapping_name=frappe.scrub(mapping_name)
))
return module
except ImportError:
return None
| mit | 6,248,976,653,685,031,000 | 30.225 | 92 | 0.716573 | false | 3.198464 | false | false | false |
aipescience/queryparser | src/queryparser/common/common.py | 1 | 36829 | # -*- coding: utf-8 -*-
# All listeners that are with minor modifications shared between PostgreSQL
# and MySQL.
from __future__ import (absolute_import, print_function)
import logging
import re
import antlr4
from antlr4.error.ErrorListener import ErrorListener
from ..exceptions import QueryError, QuerySyntaxError
def parse_alias(alias, quote_char):
"""
Extract the alias if available.
:param alias:
antlr context
:parma quote_char:
which string quote character to use
"""
if alias:
alias = alias.ID().getText().strip(quote_char)
else:
alias = None
return alias
def process_column_name(column_name_listener, walker, ctx, quote_char):
'''
A helper function that strips the quote characters from the column
names. The returned list includes:
cn[0] - schema
cn[1] - table
cn[2] - column
cn[3] - ctx
:param column_name_listener:
column_name_listener object
:param walker:
antlr walker object
:param ctx:
antlr context to walk through
:param quote_char:
which quote character are we expecting?
'''
cn = []
column_name_listener.column_name = []
walker.walk(column_name_listener, ctx)
if column_name_listener.column_name:
for i in column_name_listener.column_name:
cni = [None, None, None, i]
if i.schema_name():
cni[0] = i.schema_name().getText().replace(quote_char, '')
if i.table_name():
cni[1] = i.table_name().getText().replace(quote_char, '')
if i.column_name():
cni[2] = i.column_name().getText().replace(quote_char, '')
cn.append(cni)
else:
try:
ctx.ASTERISK()
ts = ctx.table_spec()
cn = [[None, None, '*', None]]
if ts.schema_name():
cn[0][0] = ts.schema_name().getText().replace(quote_char, '')
if ts.table_name():
cn[0][1] = ts.table_name().getText().replace(quote_char, '')
except AttributeError:
cn = [[None, None, None, None]]
return cn
def get_column_name_listener(base):
class ColumnNameListener(base):
"""
Get all column names.
"""
def __init__(self):
self.column_name = []
self.column_as_array = []
def enterColumn_spec(self, ctx):
try:
if ctx.children[1].getText():
self.column_as_array.append(ctx)
else:
self.column_as_array.append(None)
except IndexError:
self.column_as_array.append(None)
self.column_name.append(ctx)
return ColumnNameListener
def get_table_name_listener(base, quote_char):
class TableNameListener(base):
"""
Get table names.
"""
def __init__(self):
self.table_names = []
self.table_aliases = []
def enterTable_atom(self, ctx):
self.table_names.append(ctx)
def enterAlias(self, ctx):
alias = parse_alias(ctx, quote_char)
self.table_aliases.append(alias)
return TableNameListener
def get_schema_name_listener(base, quote_char):
class SchemaNameListener(base):
def __init__(self, replace_schema_name):
self.replace_schema_name = replace_schema_name
def enterSchema_name(self, ctx):
ttype = ctx.start.type
sn = ctx.getTokens(ttype)[0].getSymbol().text
try:
nsn = self.replace_schema_name[sn.replace(quote_char, '')]
try:
nsn = unicode(nsn, 'utf-8')
except NameError:
pass
nsn = re.sub(r'(|{})(?!{})[\S]*[^{}](|{})'.format(
quote_char, quote_char, quote_char, quote_char),
r'\1{}\2'.format(nsn), sn)
ctx.getTokens(ttype)[0].getSymbol().text = nsn
except KeyError:
pass
return SchemaNameListener
def get_remove_subqueries_listener(base, base_parser):
class RemoveSubqueriesListener(base):
"""
Remove nested select_expressions.
"""
def __init__(self, depth):
self.depth = depth
def enterSelect_expression(self, ctx):
parent = ctx.parentCtx.parentCtx
if isinstance(parent, base_parser.SubqueryContext) and \
ctx.depth() > self.depth:
# we need to remove all Select_expression instances, not
# just the last one so we loop over until we get all of them
# out
seinstances = [isinstance(i,
base_parser.Select_expressionContext)
for i in ctx.parentCtx.children]
while True in seinstances:
ctx.parentCtx.removeLastChild()
seinstances = [isinstance(i,
base_parser.Select_expressionContext)
for i in ctx.parentCtx.children]
return RemoveSubqueriesListener
def get_query_listener(base, base_parser, quote_char):
class QueryListener(base):
"""
Extract all select_expressions.
"""
def __init__(self):
self.select_expressions = []
self.select_list = None
self.keywords = []
self.subquery_aliases = {}
def enterSelect_statement(self, ctx):
if ctx.UNION_SYM():
self.keywords.append('union')
def enterSelect_expression(self, ctx):
# we need to keep track of unions as they act as subqueries
self.select_expressions.append(ctx)
parent = ctx.parentCtx.parentCtx
if isinstance(parent, base_parser.SubqueryContext):
try:
alias = parent.parentCtx.alias()
alias = parse_alias(alias, quote_char)
self.subquery_aliases[ctx] = alias
except AttributeError:
pass
def enterSelect_list(self, ctx):
if not self.select_list:
self.select_list = ctx
return QueryListener
def get_column_keyword_function_listener(base, quote_char):
class ColumnKeywordFunctionListener(base):
"""
Extract columns, keywords and functions.
"""
def __init__(self):
self.tables = []
self.columns = []
self.column_aliases = []
self.keywords = []
self.functions = []
self.column_name_listener = get_column_name_listener(base)()
self.table_name_listener = get_table_name_listener(
base, quote_char)()
self.walker = antlr4.ParseTreeWalker()
self.data = []
def _process_alias(self, ctx):
try:
alias = ctx.alias()
except AttributeError:
alias = None
alias = parse_alias(alias, quote_char)
return alias
def _extract_column(self, ctx, append=True, join_columns=False):
cn = process_column_name(self.column_name_listener, self.walker,
ctx, quote_char)
alias = self._process_alias(ctx)
if len(cn) > 1:
if join_columns:
columns = [[i, None, join_columns] for i in cn]
else:
columns = [[i, None] for i in cn]
else:
if join_columns:
columns = [[cn[0], alias, join_columns]]
else:
columns = [[cn[0], alias]]
if not append:
return alias, columns
if alias is not None:
self.column_aliases.append(alias)
if cn[0] not in self.column_aliases:
self.columns.extend(columns)
def enterTable_references(self, ctx):
self.walker.walk(self.table_name_listener, ctx)
tas = self.table_name_listener.table_aliases
if len(tas):
logging.info((ctx.depth(), ctx.__class__.__name__, tas))
self.data.append([ctx.depth(), ctx, tas])
else:
logging.info((ctx.depth(), ctx.__class__.__name__))
self.data.append([ctx.depth(), ctx])
def enterTable_atom(self, ctx):
alias = parse_alias(ctx.alias(), quote_char)
ts = ctx.table_spec()
if ts:
tn = [None, None]
if ts.schema_name():
tn[0] = ts.schema_name().getText().replace(quote_char, '')
if ts.table_name():
tn[1] = ts.table_name().getText().replace(quote_char, '')
self.tables.append((alias, tn, ctx.depth()))
logging.info((ctx.depth(), ctx.__class__.__name__,
[tn, alias]))
self.data.append([ctx.depth(), ctx, [tn, alias]])
def enterDisplayed_column(self, ctx):
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
self._extract_column(ctx)
if ctx.ASTERISK():
self.keywords.append('*')
def enterSelect_expression(self, ctx):
logging.info((ctx.depth(), ctx.__class__.__name__))
self.data.append([ctx.depth(), ctx])
def enterSelect_list(self, ctx):
if ctx.ASTERISK():
logging.info((ctx.depth(), ctx.__class__.__name__,
[[None, None, '*'], None]))
self.data.append([ctx.depth(), ctx, [[[None, None, '*'],
None]]])
self.columns.append(('*', None))
self.keywords.append('*')
def enterFunctionList(self, ctx):
self.functions.append(ctx.getText())
def enterGroup_functions(self, ctx):
self.functions.append(ctx.getText())
def enterGroupby_clause(self, ctx):
self.keywords.append('group by')
col = self._extract_column(ctx, append=False)
if col[1][0][0][2] not in self.column_aliases:
self._extract_column(ctx)
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
def enterWhere_clause(self, ctx):
self.keywords.append('where')
self._extract_column(ctx)
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
def enterHaving_clause(self, ctx):
self.keywords.append('having')
self._extract_column(ctx)
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
def enterOrderby_clause(self, ctx):
self.keywords.append('order by')
col = self._extract_column(ctx, append=False)
if col[1][0][0][2] not in self.column_aliases:
self._extract_column(ctx)
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
def enterLimit_clause(self, ctx):
self.keywords.append('limit')
def enterJoin_condition(self, ctx):
self.keywords.append('join')
self._extract_column(ctx, join_columns=ctx)
logging.info((ctx.depth(), ctx.__class__.__name__,
self._extract_column(ctx, append=False)[1]))
self.data.append([ctx.depth(), ctx,
self._extract_column(ctx, append=False)[1]])
def enterSpoint(self, ctx):
self.functions.append('spoint')
def enterScircle(self, ctx):
self.functions.append('scircle')
def enterSline(self, ctx):
self.functions.append('sline')
def enterSellipse(self, ctx):
self.functions.append('sellipse')
def enterSbox(self, ctx):
self.functions.append('sbox')
def enterSpoly(self, ctx):
self.functions.append('spoly')
def enterSpath(self, ctx):
self.functions.append('spath')
def enterStrans(self, ctx):
self.functions.append('strans')
return ColumnKeywordFunctionListener
class SyntaxErrorListener(ErrorListener):
def __init__(self):
super(SyntaxErrorListener, self).__init__()
self.syntax_errors = []
def syntaxError(self, recognizer, offending_symbol, line, column, msg, e):
if offending_symbol is not None:
self.syntax_errors.append((line, column, offending_symbol.text))
else:
self.syntax_errors.append((line, column, msg))
class SQLQueryProcessor(object):
"""
Object used for processing MySQL/PostgreSQL queries. Its objective is query
validation (syntax error detection) and extraction of used columns,
keywords and functions.
:param base_lexer:
Base antlr Lexer class.
:param base_parser:
Base antlr Parser class.
:param base_parser_listener:
Base antlr ParserListener class.
:param quote_char:
Which character is used to quote strings?
:param query:
SQL query string.
:param base_sphere_listener:
Base sphere listener. For now only pg_sphere is supported but
other types of listeners can be added.
"""
def __init__(self, base_lexer, base_parser, base_parser_listener,
quote_char, query=None, base_sphere_listener=None):
self.lexer = base_lexer
self.parser = base_parser
self.parser_listener = base_parser_listener
self.quote_char = quote_char
self.sphere_listener = base_sphere_listener
self.walker = antlr4.ParseTreeWalker()
self.syntax_error_listener = SyntaxErrorListener()
self.columns = set()
self.keywords = set()
self.functions = set()
self.display_columns = []
if query is not None:
self.set_query(query)
self.process_query()
def _extract_instances(self, column_keyword_function_listener):
select_list_columns = []
other_columns = []
go_columns = []
column_aliases = []
select_list_tables = []
select_list_table_references = []
join = 0
join_using = None
# Keep track of the ctx stack
ctx_stack = []
for i in column_keyword_function_listener.data:
if isinstance(i[1], self.parser.Displayed_columnContext):
# this happens if there is an expression involving
# more columns
if len(i[2]) > 1:
for j in i[2]:
other_columns.append([j])
else:
select_list_columns.append(i[2])
alias = parse_alias(i[1].alias(), '"')
if alias is not None:
column_aliases.append(alias)
ctx_stack.append(i)
if isinstance(i[1], self.parser.Table_atomContext):
select_list_tables.append([i[2], i[0]])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Table_referencesContext):
if len(i) > 2:
select_list_table_references.extend(i[2])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Select_listContext):
if len(i) == 3:
select_list_columns.append([[i[2][0][0] + [i[1]],
i[2][0][1]]])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Where_clauseContext) or\
isinstance(i[1], self.parser.Having_clauseContext):
if len(i[2]) > 1:
for j in i[2]:
other_columns.append([j])
else:
other_columns.append(i[2])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Join_conditionContext):
join = i[0]
join_using = i[2]
if i[1].USING_SYM():
for ctx in ctx_stack[::-1]:
if not isinstance(ctx[1],
self.parser.Table_atomContext):
break
for ju in join_using:
if ju[0][1] is None:
other_columns.append([[[ctx[2][0][0],
ctx[2][0][1],
ju[0][2],
ctx[1]], None]])
elif i[1].ON():
if len(i[2]) > 1:
for j in i[2]:
other_columns.append([j])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Orderby_clauseContext):
if len(i[2]) > 1:
for j in i[2]:
go_columns.append([j])
else:
go_columns.append(i[2])
ctx_stack.append(i)
if isinstance(i[1], self.parser.Groupby_clauseContext):
if len(i[2]) > 1:
for j in i[2]:
go_columns.append([j])
else:
go_columns.append(i[2])
ctx_stack.append(i)
return select_list_columns, select_list_tables,\
select_list_table_references, other_columns, go_columns, join,\
join_using, column_aliases
def _get_budget_column(self, c, tab, ref):
cname = c[0][2]
cctx = c[0][3]
calias = c[1]
t = tab
column_found = False
for bc in ref:
if bc[0][2] == '*':
t = [[bc[0][0], bc[0][1]], 'None']
column_found = True
break
elif bc[1] and c[0][2] == bc[1]:
t = [[bc[0][0], bc[0][1]], 'None']
cname = bc[0][2]
if c[1] is None:
calias = c[0][2]
column_found = True
break
elif c[0][2] == bc[0][2] and bc[1] is None:
t = [[bc[0][0], bc[0][1]], 'None']
column_found = True
break
return cname, cctx, calias, column_found, t
def _extract_columns(self, columns, select_list_tables, ref_dict, join,
budget, column_aliases, touched_columns=None,
subquery_contents=None):
# Here we store all columns that might have references somewhere
# higher up in the tree structure. We'll revisit them later.
missing_columns = []
remove_column_idxs = []
extra_columns = []
for i, col in enumerate(columns):
c = col[0]
cname = c[0][2]
cctx = c[0][3]
calias = c[1]
# if * is selected we don't care too much
if c[0][0] is None and c[0][1] is None and c[0][2] == '*'\
and not join:
for slt in select_list_tables:
extra_columns.append([[slt[0][0][0], slt[0][0][1], cname,
c[0][3]], calias])
remove_column_idxs.append(i)
continue
# this can happen for example in ... WHERE EXISTS ... clauses
if cname is None and calias is None:
remove_column_idxs.append(i)
continue
tab = [[None, None], None]
try:
tab = select_list_tables[0][0]
if tab[0][0] is None:
raise QueryError('Missing schema specification.')
# We have to check if we also have a join on the same level
# and we are actually touching a column from the joined table
if join and c[0][2] != '*' and\
(tab[1] != c[0][1] or
(tab[1] is None and c[0][1] is None)):
cname, cctx, calias, column_found, tab =\
self._get_budget_column(c, tab, budget[-1][2])
# raise an ambiguous column
if column_found and c[0][1] is None:
raise QueryError("Column '%s' is possibly ambiguous."
% c[0][2])
except IndexError:
pass
try:
# ref can be a table or a budget of columns
ref = ref_dict[c[0][1]]
column_found = False
if isinstance(ref[0], int):
# ref is a budget column
cname, cctx, calias, column_found, tab =\
self._get_budget_column(c, tab, ref[2])
ref_cols = [j[0][2] for j in ref[2]]
if not column_found and c[0][1] is not None\
and c[0][1] != tab[0][1] and '*' not in ref_cols:
raise QueryError("Unknown column '%s.%s'." % (c[0][1],
c[0][2]))
else:
# ref is a table
tab = ref[0]
except KeyError:
if None not in c[0][:3]:
cname = c[0][2]
cctx = c[0][3]
calias = c[1]
tab = [[c[0][0], c[0][1]]]
column_found = True
# table is either referenced directly or by an alias
elif c[0][2] is not None and c[0][1] is not None:
if subquery_contents is not None:
try:
contents = subquery_contents[c[0][1]]
cname, cctx, calias, column_found, tab =\
self._get_budget_column(c, tab, contents)
except KeyError:
tabs = [j[0][0][:2] for j in
subquery_contents.values()]
tabs += [j[0][0] for j in select_list_tables]
column_found = False
for t in tabs:
if t[1] == c[0][1]:
cname = c[0][2]
cctx = c[0][3]
calias = c[1]
tab = [t]
column_found = True
if not column_found:
missing_columns.append(c)
columns[i] = c
if touched_columns is not None:
touched_columns.append(c)
continue
else:
if tab[0][1] == c[0][1]:
columns[i] = [[tab[0][0], tab[0][1],
c[0][2], c[0][3]], c[1]]
else:
missing_columns.append(c)
columns[i] = c
if touched_columns is not None:
touched_columns.append(c)
continue
elif c[0][2] is not None and c[0][2] != '*' and c[0][1] is \
None and len(ref_dict.keys()) > 1 and not join:
raise QueryError("Column '%s' is ambiguous." % c[0][2])
elif len(budget) and tab[0][0] is None and tab[0][1] is None:
ref = budget[-1]
column_found = False
if isinstance(ref[0], int):
cname, cctx, calias, column_found, tab =\
self._get_budget_column(c, tab, ref[2])
# We allow None.None columns because they are produced
# by count(*)
if not column_found and c[0][2] is not None\
and c[0][2] not in column_aliases:
raise QueryError("Unknown column '%s'." % c[0][2])
if touched_columns is not None:
touched_columns.append([[tab[0][0], tab[0][1], cname, cctx],
calias])
else:
columns[i] = [[tab[0][0], tab[0][1], cname, c[0][3]], calias]
for i in remove_column_idxs[::-1]:
columns.pop(i)
columns.extend(extra_columns)
return missing_columns
def process_query(self, replace_schema_name=None, indexed_objects=None):
"""
Parses and processes the query. After a successful run it fills up
columns, keywords, functions and syntax_errors lists.
:param replace_schema_name:
A new schema name to be put in place of the original.
:param indexed_objects:
A dictionary defining pgsphere objects to be replaced with
precomputed (on the database level) columns. For example,
iob = {'spoint': ((('gdr2', 'gaia_source', 'ra'),
('gdr2', 'gaia_source', 'dec'), 'pos'),)}
will replace 'spoint(RADIANS(ra), RADIANS(dec))' with a 'pos'
column.
"""
# Antlr objects
inpt = antlr4.InputStream(self.query)
lexer = self.lexer(inpt)
stream = antlr4.CommonTokenStream(lexer)
parser = self.parser(stream)
lexer._listeners = [self.syntax_error_listener]
parser._listeners = [self.syntax_error_listener]
# Parse the query
tree = parser.query()
if len(self.syntax_error_listener.syntax_errors):
raise QuerySyntaxError(self.syntax_error_listener.syntax_errors)
if replace_schema_name is not None:
schema_name_listener = get_schema_name_listener(
self.parser_listener, self.quote_char)(replace_schema_name)
self.walker.walk(schema_name_listener, tree)
self._query = stream.getText()
query_listener = get_query_listener(self.parser_listener,
self.parser, self.quote_char)()
subquery_aliases = [None]
keywords = []
functions = []
tables = []
self.walker.walk(query_listener, tree)
keywords.extend(query_listener.keywords)
subquery_aliases = query_listener.subquery_aliases
# Columns that are accessed by the query
touched_columns = []
# List we use to propagate the columns through the tree
budget = []
# Are there any joins in the query?
join = 0
missing_columns = []
column_aliases = []
column_aliases_from_previous = []
subquery_contents = {}
# Iterate through subqueries starting with the lowest level
for ccc, ctx in enumerate(query_listener.select_expressions[::-1]):
remove_subquieries_listener = get_remove_subqueries_listener(
self.parser_listener, self.parser)(ctx.depth())
column_keyword_function_listener = \
get_column_keyword_function_listener(
self.parser_listener, self.quote_char)()
# Remove nested subqueries from select_expressions
self.walker.walk(remove_subquieries_listener, ctx)
# Extract table and column names, keywords, functions
self.walker.walk(column_keyword_function_listener, ctx)
keywords.extend(column_keyword_function_listener.keywords)
functions.extend(column_keyword_function_listener.functions)
# Does the subquery has an alias?
try:
subquery_alias = subquery_aliases[ctx]
except KeyError:
subquery_alias = None
current_depth = column_keyword_function_listener.data[0][0]
# We get the columns from the select list along with all
# other touched columns and any possible join conditions
column_aliases_from_previous = [i for i in column_aliases]
select_list_columns, select_list_tables,\
select_list_table_references, other_columns, go_columns, join,\
join_using, column_aliases =\
self._extract_instances(column_keyword_function_listener)
tables.extend([i[0] for i in select_list_tables])
# Then we need to connect the column names s with tables and
# databases
ref_dict = {}
for ref in select_list_table_references:
ref_found = False
for tab in select_list_tables:
if ref == tab[0][1]:
ref_dict[ref] = tab
ref_found = True
if not ref_found:
for b in budget:
if ref == b[1]:
ref_dict[ref] = b
if not len(select_list_table_references):
for table in select_list_tables:
ref_dict[table[0][0][1]] = table
mc = self._extract_columns(select_list_columns, select_list_tables,
ref_dict, join, budget,
column_aliases_from_previous)
missing_columns.extend([[i] for i in mc])
touched_columns.extend(select_list_columns)
current_columns = [i for i in select_list_columns]
budget.append([current_depth, subquery_alias, select_list_columns])
aliases = [i[1] for i in select_list_columns] + column_aliases
for col in go_columns:
if col[0][0][2] not in aliases:
other_columns.append(col)
mc = self._extract_columns(other_columns, select_list_tables,
ref_dict, join, budget,
column_aliases_from_previous,
touched_columns)
missing_columns.extend([[i] for i in mc])
if join:
join_columns = []
join_columns.append(budget.pop(-1))
if len(join_using) == 1:
for tab in select_list_tables:
touched_columns.append([[tab[0][0][0], tab[0][0][1],
join_using[0][0][2]], None])
bp = []
for b in budget[::-1]:
if b[0] > current_depth:
bp.append(budget.pop(-1)[2])
budget.extend(join_columns)
if subquery_alias is not None:
subquery_contents[subquery_alias] = current_columns
if len(missing_columns):
mc = self._extract_columns(missing_columns, select_list_tables,
ref_dict, join, budget,
column_aliases_from_previous,
touched_columns, subquery_contents)
if len(mc):
unref_cols = "', '".join(['.'.join([j for j in i[0][:3] if j])
for i in mc])
raise QueryError("Unreferenced column(s): '%s'." % unref_cols)
# If we have indexed_objects, we are also accessing those. We
# need to add them into the columns stack:
if indexed_objects is not None:
for k, v in indexed_objects.items():
for vals in v:
touched_columns.append([[vals[0][0], vals[0][1], vals[2],
None], None])
touched_columns = set([tuple(i[0]) for i in touched_columns])
# extract display_columns
display_columns = []
mc = self._extract_columns([[i] for i in budget[-1][2]],
select_list_tables, ref_dict, join, budget,
column_aliases_from_previous,
display_columns, subquery_contents)
display_columns = [[i[1] if i[1] else i[0][2], i[0]]
for i in display_columns]
# Let's get rid of all columns that are already covered by
# db.tab.*. Figure out a better way to do it and replace the code
# below.
asterisk_columns = []
del_columns = []
for col in touched_columns:
if col[2] == '*':
asterisk_columns.append(col)
for acol in asterisk_columns:
for col in touched_columns:
if acol[0] == col[0] and acol[1] == col[1] and \
acol[2] != col[2]:
del_columns.append(col)
columns = list(set(touched_columns).difference(del_columns))
self.columns = list(set([self._strip_column(i) for i in columns]))
self.keywords = list(set(keywords))
self.functions = list(set(functions))
self.display_columns = [(i[0].lstrip('"').rstrip('"'),
list(self._strip_column(i[1])))
for i in display_columns]
self.tables = list(set([tuple([i[0][0].lstrip('"').rstrip('"')
if i[0][0] is not None else i[0][0],
i[0][1].lstrip('"').rstrip('"')
if i[0][1] is not None else i[0][1]])
for i in tables]))
# If there are any sphere-like objects (pgsphere...) that are indexed
# we need to replace the ADQL translated query parts with the indexed
# column names
if indexed_objects is not None and self.sphere_listener is not None:
# we need to correctly alias 'pos' columns
for k, v in indexed_objects.items():
indexed_objects[k] = list([list(i) for i in v])
for i, vals in enumerate(v):
for t in tables:
if vals[0][0] == t[0][0] and vals[0][1] == t[0][1] and\
t[1] is not None:
indexed_objects[k][i][2] = t[1] + '.' +\
indexed_objects[k][i][2]
sphere_listener = self.sphere_listener(columns, indexed_objects)
self.walker.walk(sphere_listener, tree)
for k, v in sphere_listener.replace_dict.items():
self._query = self._query.replace(k, v)
@property
def query(self):
"""
Get the query string.
"""
return self._query
def _strip_query(self, query):
return query.lstrip('\n').rstrip().rstrip(';') + ';'
def _strip_column(self, col):
scol = [None, None, None]
for i in range(3):
if col[i] is not None:
scol[i] = col[i].lstrip('"').rstrip('"')
return tuple(scol)
def set_query(self, query):
"""
Helper to set the query string.
"""
self.columns = set()
self.keywords = set()
self.functions = set()
self.display_columns = []
self.syntax_error_listener = SyntaxErrorListener()
self._query = self._strip_query(query)
| apache-2.0 | 2,043,449,946,241,649,200 | 36.126008 | 79 | 0.486302 | false | 4.34663 | false | false | false |
aagusti/osipkd-json-rpc | jsonrpc/tools.py | 1 | 6381 | import os
import re
from types import (
StringType,
UnicodeType,
BooleanType,
LongType,
IntType,
)
from decimal import Decimal
from datetime import (
datetime,
timedelta,
date
)
import locale
import pytz
from pyramid.threadlocal import get_current_registry
kini = datetime.now()
DateType = type(kini.date())
DateTimeType = type(kini)
TimeType = type(kini.time())
DecimalType = type(Decimal(0))
def dmy(tgl):
return tgl.strftime('%d-%m-%Y')
def dmyhms(t):
return t.strftime('%d-%m-%Y %H:%M:%S')
def hms(t):
return t.strftime('%H:%M:%S')
def to_simple_value(v):
typ = type(v)
if typ is DateType:
return v.isoformat() #dmy(v)
if typ is DateTimeType:
return v.isoformat() #dmyhms(v)
if typ is TimeType:
return hms(v)
if typ is DecimalType:
return float(v)
if typ in (LongType, IntType):
#if v < MININT or v > MAXINT:
return str(v)
if v == 0:
return '0'
if typ in [UnicodeType, StringType]:
return v.strip()
if v is None:
return ''
return v
def dict_to_simple_value(d):
r = {}
for key in d:
val = d[key]
if type(key) not in (UnicodeType, StringType):
key = str(key)
r[key] = to_simple_value(val)
return r
def date_from_str(value):
separator = None
value = value.split()[0] # dd-mm-yyyy HH:MM:SS
for s in ['-', '/']:
if value.find(s) > -1:
separator = s
break
if separator:
t = map(lambda x: int(x), value.split(separator))
y, m, d = t[2], t[1], t[0]
if d > 999: # yyyy-mm-dd
y, d = d, y
else: # if len(value) == 8: # yyyymmdd
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:])
return date(y, m, d)
################
# Phone number #
################
MSISDN_ALLOW_CHARS = map(lambda x: str(x), range(10)) + ['+']
def get_msisdn(msisdn, country='+62'):
for ch in msisdn:
if ch not in MSISDN_ALLOW_CHARS:
return
try:
i = int(msisdn)
except ValueError, err:
return
if not i:
return
if len(str(i)) < 7:
return
if re.compile(r'^\+').search(msisdn):
return msisdn
if re.compile(r'^0').search(msisdn):
return '%s%s' % (country, msisdn.lstrip('0'))
################
# Money format #
################
def should_int(value):
int_ = int(value)
return int_ == value and int_ or value
def thousand(value, float_count=None):
if float_count is None: # autodetection
if type(value) in (IntType, LongType):
float_count = 0
else:
float_count = 2
return locale.format('%%.%df' % float_count, value, True)
def money(value, float_count=None, currency=None):
if value < 0:
v = abs(value)
format_ = '(%s)'
else:
v = value
format_ = '%s'
if currency is None:
currency = locale.localeconv()['currency_symbol']
s = ' '.join([currency, thousand(v, float_count)])
return format_ % s
###########
# Pyramid #
###########
def get_settings():
return get_current_registry().settings
def get_timezone():
settings = get_settings()
return pytz.timezone(settings.timezone)
########
# Time #
########
one_second = timedelta(1.0/24/60/60)
TimeZoneFile = '/etc/timezone'
if os.path.exists(TimeZoneFile):
DefaultTimeZone = open(TimeZoneFile).read().strip()
else:
DefaultTimeZone = 'Asia/Jakarta'
def as_timezone(tz_date):
localtz = get_timezone()
if not tz_date.tzinfo:
tz_date = create_datetime(tz_date.year, tz_date.month, tz_date.day,
tz_date.hour, tz_date.minute, tz_date.second,
tz_date.microsecond)
return tz_date.astimezone(localtz)
def create_datetime(year, month, day, hour=0, minute=7, second=0,
microsecond=0):
tz = get_timezone()
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo=tz)
def create_date(year, month, day):
return create_datetime(year, month, day)
def create_now():
tz = get_timezone()
return datetime.now(tz)
##############
# Fix Length #
##############
class FixLength(object):
def __init__(self, struct):
self.set_struct(struct)
def set_struct(self, struct):
self.struct = struct
self.fields = {}
new_struct = []
for s in struct:
name = s[0]
size = s[1:] and s[1] or 1
typ = s[2:] and s[2] or 'A' # N: numeric, A: alphanumeric
self.fields[name] = {'value': None, 'type': typ, 'size': size}
new_struct.append((name, size, typ))
self.struct = new_struct
def set(self, name, value):
self.fields[name]['value'] = value
def get(self, name):
return self.fields[name]['value']
def __setitem__(self, name, value):
self.set(name, value)
def __getitem__(self, name):
return self.get(name)
def get_raw(self):
s = ''
for name, size, typ in self.struct:
v = self.fields[name]['value']
pad_func = typ == 'N' and right or left
if typ == 'N':
v = v or 0
i = int(v)
if v == i:
v = i
else:
v = v or ''
s += pad_func(v, size)
return s
def set_raw(self, raw):
awal = 0
for t in self.struct:
name = t[0]
size = t[1:] and t[1] or 1
akhir = awal + size
value = raw[awal:akhir]
if not value:
return
self.set(name, value)
awal += size
return True
def from_dict(self, d):
for name in d:
value = d[name]
self.set(name, value)
############
# Database #
############
def split_tablename(tablename):
t = tablename.split('.')
if t[1:]:
schema = t[0]
tablename = t[1]
else:
schema = None
return schema, tablename
###########
# Pyramid #
###########
def get_settings():
reg = get_current_registry()
return reg.settings
| lgpl-2.1 | -195,858,637,387,680,320 | 24.122047 | 79 | 0.51622 | false | 3.447326 | false | false | false |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_csv_import.py | 1 | 19211 | # vim: set fileencoding=utf-8 :
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from bkr.inttest.server.selenium import WebDriverTestCase
from bkr.inttest.server.webdriver_utils import login, logout, is_text_present
from bkr.inttest import data_setup, get_server_base, with_transaction
from bkr.inttest.assertions import assert_has_key_with_value
from bkr.server.model import Arch, System, OSMajor, SystemPermission, \
SystemStatus
from turbogears.database import session
import pkg_resources
import unittest2 as unittest
from tempfile import NamedTemporaryFile
from decimal import Decimal
class CSVImportTest(WebDriverTestCase):
def setUp(self):
with session.begin():
self.system = data_setup.create_system(
lab_controller=data_setup.create_labcontroller())
self.browser = self.get_browser()
def import_csv(self, contents):
b = self.browser
b.get(get_server_base() + 'csv/csv_import')
csv_file = NamedTemporaryFile(prefix=self.__module__)
csv_file.write(contents)
csv_file.flush()
b.find_element_by_name('csv_file').send_keys(csv_file.name)
b.find_element_by_name('csv_file').submit()
def test_system(self):
login(self.browser)
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,%s,Under my desk,ia64' % self.system.fqdn)
.encode('utf8'))
self.failUnless(is_text_present(self.browser, "No Errors"))
with session.begin():
session.refresh(self.system)
self.assertEquals(self.system.location, u'Under my desk')
self.assert_(Arch.by_name(u'ia64') in self.system.arch)
self.assert_(self.system.date_modified > orig_date_modified)
# attempting to import a system with no FQDN should fail
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,'',Under my desk,ia64').encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"Error importing line 2: "
"System must have an associated FQDN")
# attempting to import a system with an invalid FQDN should fail
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,invalid--fqdn,Under my desk,ia64').encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"Error importing line 2: "
"Invalid FQDN for system: invalid--fqdn")
#https://bugzilla.redhat.com/show_bug.cgi?id=987157
def test_system_rename(self):
login(self.browser)
# attempt to rename existing system to an invalid FQDN should keep
# the system unmodified
with session.begin():
session.refresh(self.system)
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,id,fqdn,location,arch\n'
u'system,%s,new--fqdn.name,%s,%s' % (self.system.id,
self.system.location, self.system.arch[0])).encode('utf8'))
with session.begin():
session.refresh(self.system)
self.assertEquals(self.system.date_modified, orig_date_modified)
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"Error importing line 2: "
"Invalid FQDN for system: new--fqdn.name")
# attempt to rename a non-existent system should fail
orig_date_modified = self.system.date_modified
non_existent_system_id = -1
self.import_csv((u'csv_type,id,fqdn,location,arch\n'
u'system,%s,new--fqdn.name,%s,%s' % (non_existent_system_id,
self.system.location, self.system.arch[0])).encode('utf8'))
with session.begin():
session.refresh(self.system)
self.assertEquals(self.system.date_modified, orig_date_modified)
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"Error importing line 2: "
"Non-existent system id")
# successfully rename existing system
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,id,fqdn,location,arch\n'
u'system,%s,new.fqdn.name,Under my desk,ia64' % self.system.id).encode('utf8'))
with session.begin():
session.refresh(self.system)
self.assertGreater(self.system.date_modified, orig_date_modified)
self.assertEquals(self.system.fqdn, 'new.fqdn.name')
def test_grants_view_permission_to_everybody_by_default(self):
fqdn = data_setup.unique_name(u'test-csv-import%s.example.invalid')
b = self.browser
login(b)
self.import_csv((u'csv_type,fqdn\n'
u'system,%s' % fqdn).encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
'No Errors')
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
self.assertTrue(system.custom_access_policy.grants_everybody(
SystemPermission.view))
def test_system_secret_field(self):
login(self.browser)
self.import_csv((u'csv_type,fqdn,secret\n'
u'system,%s,True' % self.system.fqdn)
.encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
'No Errors')
with session.begin():
session.refresh(self.system.custom_access_policy)
self.assertFalse(self.system.custom_access_policy.grants_everybody(
SystemPermission.view))
self.import_csv((u'csv_type,fqdn,secret\n'
u'system,%s,False' % self.system.fqdn)
.encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
'No Errors')
with session.begin():
session.refresh(self.system.custom_access_policy)
self.assertTrue(self.system.custom_access_policy.grants_everybody(
SystemPermission.view))
def test_keyvalue(self):
login(self.browser)
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n'
u'keyvalue,%s,COMMENT,UTF 8 –,False' % self.system.fqdn)
.encode('utf8'))
self.failUnless(is_text_present(self.browser, "No Errors"))
with session.begin():
session.refresh(self.system)
assert_has_key_with_value(self.system, 'COMMENT', u'UTF 8 –')
self.assert_(self.system.date_modified > orig_date_modified)
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_keyvalue_non_existent_system_valid(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n'
u'keyvalue,%s,COMMENT,acomment,False' % fqdn)
.encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"No Errors")
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
assert_has_key_with_value(system, 'COMMENT', u'acomment')
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_keyvalue_non_existent_system_valid_invalid(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n'
u'keyvalue,%s,COMMENT,acomment,False\n'
u'keyvalue,%s,COMMENT,acomment,False' % (fqdn, '--'+fqdn))
.encode('utf8'))
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
"Error importing line 3: "
"Invalid FQDN for system: --%s" % fqdn)
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
assert_has_key_with_value(system, 'COMMENT', u'acomment')
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_labinfo_non_existent_system(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
self.import_csv((u'csv_type,fqdn,orig_cost,curr_cost,dimensions,weight,wattage,cooling\n'
u'labinfo,%s,10000,10000,3000,4000.0,5001.0,6000.0' % fqdn)
.encode('utf8'))
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
self.assertEqual(system.labinfo.orig_cost, Decimal('10000'))
self.assertEqual(system.labinfo.curr_cost, Decimal('10000'))
self.assertEqual(system.labinfo.dimensions, u'3000')
self.assertEqual(system.labinfo.weight, 4000.0)
self.assertEqual(system.labinfo.wattage, 5001.0)
self.assertEqual(system.labinfo.cooling, 6000.0)
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_power_non_existent_system(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
self.import_csv((u'csv_type,fqdn,power_address,power_user,power_password,power_id,power_type\n'
u'power,%s,qemu+tcp://%s,admin,admin,%s,virsh' % ((fqdn, )*3))
.encode('utf8'))
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
self.assertEqual(system.power.power_id, fqdn)
self.assertEqual(system.power.power_user, 'admin')
self.assertEqual(system.power.power_address, 'qemu+tcp://' + fqdn)
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_excluded_family_non_existent_system(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
with session.begin():
osmajor = OSMajor.lazy_create(osmajor=u'MyEnterpriseLinux')
self.import_csv((u'csv_type,fqdn,arch,family,update,excluded\n'
u'exclude,%s,x86_64,MyEnterpriseLinux,,True' %
fqdn)
.encode('utf8'))
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
self.assertEquals(system.excluded_osmajor[0].osmajor_id,
osmajor.id)
#https://bugzilla.redhat.com/show_bug.cgi?id=1058549
def test_install_options_non_existent_system(self):
login(self.browser)
fqdn = data_setup.unique_name('system%s.idonot.exist')
with session.begin():
distro_tree = data_setup.create_distro_tree(osmajor='MyEnterpriseLinux',
arch=u'x86_64')
self.import_csv((u'csv_type,fqdn,arch,family,update,ks_meta,kernel_options,kernel_options_post\n'
u'install,%s,x86_64,MyEnterpriseLinux,,mode=cmdline,,console=ttyS0' %
fqdn)
.encode('utf8'))
with session.begin():
system = System.query.filter(System.fqdn == fqdn).one()
arch = Arch.by_name(u'x86_64')
osmajor = OSMajor.by_name(u'MyEnterpriseLinux')
p = system.provisions[arch].provision_families[osmajor]
self.assertEquals(p.ks_meta, u'mode=cmdline')
self.assertEquals(p.kernel_options_post, u'console=ttyS0')
# https://bugzilla.redhat.com/show_bug.cgi?id=787519
def test_no_quotes(self):
with session.begin():
data_setup.create_labcontroller(fqdn=u'imhoff.bkr')
b = self.browser
login(b)
b.get(get_server_base() + 'csv/csv_import')
b.find_element_by_name('csv_file').send_keys(
pkg_resources.resource_filename(self.__module__, 'bz787519.csv'))
b.find_element_by_name('csv_file').submit()
self.failUnless(is_text_present(self.browser, "No Errors"))
# https://bugzilla.redhat.com/show_bug.cgi?id=802842
def test_doubled_quotes(self):
with session.begin():
system = data_setup.create_system(fqdn=u'mymainframe.funtimes.invalid', arch=u's390x')
OSMajor.lazy_create(osmajor=u'RedHatEnterpriseLinux7')
b = self.browser
login(b)
b.get(get_server_base() + 'csv/csv_import')
b.find_element_by_name('csv_file').send_keys(
pkg_resources.resource_filename(self.__module__, 'bz802842.csv'))
b.find_element_by_name('csv_file').submit()
self.failUnless(is_text_present(self.browser, "No Errors"))
with session.begin():
session.refresh(system)
self.assertEquals(system.provisions[Arch.by_name(u's390x')]\
.provision_families[OSMajor.by_name(u'RedHatEnterpriseLinux7')]\
.kernel_options,
'rd.znet="qeth,0.0.8000,0.0.8001,0.0.8002,layer2=1,portname=lol,portno=0" '
'ip=1.2.3.4::1.2.3.4:255.255.248.0::eth0:none MTU=1500 nameserver=1.2.3.4 '
'DASD=20A1,21A1,22A1,23A1 MACADDR=02:DE:AD:BE:EF:16 '
'!LAYER2 !DNS !PORTNO !IPADDR !GATEWAY !HOSTNAME !NETMASK ')
def test_missing_field(self):
login(self.browser)
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,%s,Under my desk' % self.system.fqdn)
.encode('utf8'))
self.assert_(is_text_present(self.browser, 'Missing fields on line 2: arch'))
def test_extraneous_field(self):
login(self.browser)
orig_date_modified = self.system.date_modified
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,%s,Under my desk,ppc64,what is this field doing here' % self.system.fqdn)
.encode('utf8'))
self.assert_(is_text_present(self.browser, 'Too many fields on line 2 (expecting 4)'))
# https://bugzilla.redhat.com/show_bug.cgi?id=972411
def test_malformed(self):
login(self.browser)
self.import_csv('gar\x00bage')
self.assertEquals(self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text,
'Error parsing CSV file: line contains NULL byte')
# https://bugzilla.redhat.com/show_bug.cgi?id=1085047
def test_rolls_back_on_error(self):
# The bug was that a row contained invalid data, which meant it was
# being discarded, but changes to system_status_duration were
# nevertheless being committed.
# To reproduce, we upload a CSV which changes 'status' successfully
# (thereby causing a row to be added to system_status_duration) but
# then errors out on 'secret' which does not accept empty string.
with session.begin():
self.assertEquals(len(self.system.status_durations), 1)
self.assertEquals(self.system.status_durations[0].status,
SystemStatus.automated)
self.assertEquals(self.system.status_durations[0].finish_time, None)
login(self.browser)
self.import_csv((u'csv_type,id,status,secret\n'
u'system,%s,Manual,\n' % self.system.id).encode('utf8'))
import_log = self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text
self.assertIn('Invalid secret None', import_log)
with session.begin():
session.expire_all()
self.assertEquals(self.system.status, SystemStatus.automated)
self.assertEquals(len(self.system.status_durations), 1)
self.assertEquals(self.system.status_durations[0].finish_time, None)
#https://bugzilla.redhat.com/show_bug.cgi?id=1085238
def test_error_on_empty_csv(self):
login(self.browser)
self.import_csv((u'csv_type,fqdn,location,arch\n').encode('utf8'))
import_log = self.browser.find_element_by_xpath(
'//table[@id="csv-import-log"]//td').text
self.assertIn('Empty CSV file supplied', import_log)
def test_system_unicode(self):
login(self.browser)
self.import_csv((u'csv_type,fqdn,location,arch\n'
u'system,%s,在我的办公桌,ia64' % self.system.fqdn) \
.encode('utf8'))
self.failUnless(is_text_present(self.browser, "No Errors"))
with session.begin():
session.refresh(self.system)
self.assertEquals(self.system.location, u'在我的办公桌')
def test_system_pools_import(self):
with session.begin():
system = data_setup.create_system()
pool1 = data_setup.create_system_pool()
pool2 = data_setup.create_system_pool()
login(self.browser)
self.import_csv((u'csv_type,fqdn,pool,deleted\n'
u'system_pool,%s,%s,False\n'
u'system_pool,%s,%s,False'%(system.fqdn, pool1.name,
system.fqdn, pool2.name)) \
.encode('utf8'))
self.failUnless(is_text_present(self.browser, 'No Errors'))
with session.begin():
session.refresh(system)
self.assertEquals([pool1.name, pool2.name],
[pool.name for pool in system.pools])
# test deletion
self.import_csv((u'csv_type,fqdn,pool,deleted\n'
u'system_pool,%s,%s,True' % (system.fqdn, pool2.name)) \
.encode('utf8'))
self.failUnless(is_text_present(self.browser, 'No Errors'))
with session.begin():
session.refresh(system)
self.assertNotIn(pool2.name, [pool.name for pool in system.pools])
# Attempting to add a system to a Non existent pool should throw an error
self.import_csv((u'csv_type,fqdn,pool,deleted\n'
u'system_pool,%s,poolpool,True' % system.fqdn) \
.encode('utf8'))
self.assertTrue(is_text_present(self.browser, 'poolpool: pool does not exist'))
| gpl-2.0 | 1,782,505,370,884,736,500 | 48.187179 | 116 | 0.593807 | false | 3.652513 | true | false | false |
university-gender-evolution/py-university-gender-dynamics-pkg | pyugend/abcComparisonPlot.py | 1 | 2906 | #!/usr/bin/python
"""
Abstract Base Class for building plots
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright '
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = ''
__maintainer__ = 'krishna bhogaonker'
__email__ = '[email protected]'
__status__ = ''
import abc
from bokeh.plotting import figure, output_file, show
from .PlotSettingsOverall import PlotSettingsOverall
height = 800
width = 800
class abcComparisonPlot(metaclass=abc.ABCMeta):
def __init__(self, model_results, settings=None):
self.plot = None
self.settings = settings
self.comparison = model_results
self.coordinates = {}
@abc.abstractmethod
def helper_overall_data(self):
pass
@abc.abstractmethod
def helper_level_data(self):
pass
@abc.abstractmethod
def helper_overall_empirical_upper_bound(self):
pass
@abc.abstractmethod
def helper_overall_empirical_lower_bound(self):
pass
@abc.abstractmethod
def helper_ground_truth_mgmt(self):
pass
@abc.abstractmethod
def helper_build_overall_plot_coordinates(self):
pass
@abc.abstractmethod
def execute_plot(self):
pass
@abc.abstractmethod
def helper_build_settings(self):
pass
def helper_duration(self):
xval = list(range(min([m.duration for m in self.comparison])))[self.settings['year_offset']:]
return xval
def helper_original_data_mgmt(self, field):
dval = self.comparison[0].mgmt_data.get_field(field)
return dval
def helper_indicate_number_of_models(self):
self.coordinates['number_of_models'] = len(self.comparison)
def helper_year_duration(self):
self.coordinates['xval'] = self.helper_duration()
| mit | -5,602,088,571,637,038,000 | 29.589474 | 463 | 0.703716 | false | 3.997249 | false | false | false |
xcoder123/KodiLatviesiem | plugin.video.filmaslatviski/fof.py | 1 | 12491 | # -*- coding: utf-8 -*-
import network
import sys
import re
import xbmc
import xbmcgui
import xbmcplugin
import urlresolver
import CommonFunctions
import kodi_func
import os
import codecs
common = CommonFunctions
common.plugin = "Filmas-Latviski-1.0.0"
mySourceId = 4
mainURL = 'http://www.fof.lv'
#indexed search
def SearchRaw(searchStr):
result = []
if searchStr == False or len(searchStr) == 0: return result
moviesList = []
moviesList = LoadIndexedFile( kodi_func.home + "/resources/fof_lv_movieIndexes.txt" )
print moviesList
for movie in moviesList:
if searchStr in movie['searchable_title']:
result.append({
'title': movie['title'].replace('<img src="http://fof.lv/lat-sub-icon.png" style="position: relative; left: 10px; top: 2px;">', '').encode('utf-8'),
'url': movie['url'],
'thumb': movie['thumb'],
'state': 'state_play',
'source_id': mySourceId
})
return result
#Search in fof where it uses their shitty search function, which doesn't fucking work at all
def SearchRaw_old(searchStr):
result = []
html = network.getHTML( "http://www.fof.lv/search/?q=" + searchStr)
allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" })
if len(allEntries) == 0:
allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" })
# print allEntries
infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" })
moviesURLs = common.parseDOM(infoTd, "a", ret = "href")
moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style")
if len(moviesThumbnailURLsList) == 0:
moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src")
moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} )
# moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} )
print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList
# moviesTitleList = common.parseDOM(moviesList, "h2")
# moviesThumbnailURLsList = common.parseDOM(moviesList, "img", attrs = { "class": "img-thumbnail" }, ret = "src")
# moviesURLs = common.parseDOM(moviesList, "a", ret = "href")
# print moviesThumbnailURLsList
for i in range(0, len(moviesURLs)):
thumb = moviesThumbnailURLsList[i].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/")
if network.exists( mainURL+thumb ) == False:
thumb = thumb.replace(".jpg", ".png")
# title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i])
title = moviesTitleList[i].partition("<br>")[0].replace("<b>","").replace("</b>", "")
if not moviesURLs[i].startswith("http://"):
movieURL = mainURL + moviesURLs[i]
else:
movieURL = moviesURLs[i]
result.append({
'title':title.encode('utf-8'),
'url': movieURL,
'thumb': mainURL+thumb,
'source_id': mySourceId
})
return result
def Search(searchStr = None):
if searchStr == None:
text = kodi_func.showkeyboard('', u'Meklēt filmu')
else:
text = searchStr
print "Search string: " + str(text)
results = SearchRaw(text)
for r in results:
kodi_func.addDir(r['title'], r['url'], 'state_play', r['thumb'], source_id=r['source_id'])
def HomeNavigation():
if not os.path.isfile( kodi_func.home + "/resources/fof_lv_movieIndexes.txt" ):
IndexMovies( 'http://www.fof.lv/?page', 'fof_lv_movieIndexes.txt' )
print "Opening fof.lv"
url = mainURL
html = network.getHTML(url)
# print 'html: ' + html
nav_links_list = common.parseDOM(html, "div", attrs = { "class": "categories" })
nav_links = common.parseDOM(nav_links_list, "a", ret = "href")
nav_links_name = common.parseDOM(nav_links_list, "a")
kodi_func.addDir('Meklēt', '', 'state_search', '%s/meklet2.png'% kodi_func.iconpath, source_id=mySourceId)
kodi_func.addDir('Jaunākās Filmas', 'http://www.fof.lv/?page1', 'state_movies', kodi_func.GetCategoryImage('jaunakas'), source_id=mySourceId)
kodi_func.addDir('Populārākās', 'http://www.fof.lv/index/popularakas_filmas/0-13', 'state_movies', kodi_func.GetCategoryImage('skatitakas'), source_id=mySourceId)
kodi_func.addDir('Vērtētākās', 'http://www.fof.lv/index/vertetakas_filmas/0-16', 'state_movies', kodi_func.GetCategoryImage('vertetakas'), source_id=mySourceId)
# pagirasList = u'https://openload.co/embed/dLuET3ML86E/Deadpool.%28Dedpuls%29.2016.720p.LAT.THEVIDEO.LV.mkv.mp4'
# link = urlresolver.resolve(pagirasList)
# addDir('Dedpūls', pagirasList, 'state_play', None)
# addLink("Dedpūls", link.encode('utf-8'), None)
# print nav_links
# print nav_links_name
for i in range(0, len(nav_links)):
if kodi_func.isLinkUseful(nav_links[i]):
# print mainURL + nav_links[i]
kodi_func.addDir(nav_links_name[i].encode('utf-8'), nav_links[i], 'state_movies', kodi_func.GetCategoryImage(nav_links_name[i]), source_id=mySourceId)
def Movies(url, page=1):
print "url: " + url
if '?page1' in url:
html = network.getHTML(mainURL+"/?page"+str(page))
else:
html = network.getHTML(url+"-"+str(page))
# html = network.getHTML(url)
# print "html " + html
allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" })
if len(allEntries) == 0:
allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" })
# print allEntries
infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" })
moviesURLs = common.parseDOM(infoTd, "a", ret = "href")
moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style")
if len(moviesThumbnailURLsList) == 0:
moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src")
moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} )
# moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} )
print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList
# moviesTitleList = common.parseDOM(moviesList, "h2")
# moviesThumbnailURLsList = common.parseDOM(moviesList, "img", attrs = { "class": "img-thumbnail" }, ret = "src")
# moviesURLs = common.parseDOM(moviesList, "a", ret = "href")
# print moviesThumbnailURLsList
for i in range(0, len(moviesURLs)):
thumb = moviesThumbnailURLsList[i].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/")
if network.exists( mainURL+thumb ) == False:
thumb = thumb.replace(".jpg", ".png")
# title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i])
title = moviesTitleList[i].partition("<br>")[0].replace('<img src="http://fof.lv/lat-sub-icon.png" style="position: relative; left: 10px; top: 2px;">', '')
if not moviesURLs[i].startswith("http://"):
movieURL = mainURL + moviesURLs[i]
else:
movieURL = moviesURLs[i]
kodi_func.addDir(title.encode('utf-8'), movieURL, 'state_play', mainURL+thumb, source_id=mySourceId)
if len(moviesURLs) >= 10 and url != 'http://www.fof.lv/index/popularakas_filmas/0-13' and url != 'http://www.fof.lv/index/vertetakas_filmas/0-16':
kodi_func.addDir("Nākamā Lapa >>", url , 'state_movies', '%s/next.png'% kodi_func.iconpath, str(int(page) + 1), source_id=mySourceId)
def PlayMovie(url, title, picture):
print "url: " + url
html = network.getHTML(url)
# print "html: " + html
mainMovieCol = common.parseDOM(html, "div", attrs = { "id": "movie"} )
print mainMovieCol
video = common.parseDOM(mainMovieCol, "iframe", ret="src")[0]
try:
link = urlresolver.resolve(video)
if link != False:
kodi_func.addLink(title.decode('utf-8').encode('utf-8') + " - Latviski", link.encode('utf-8'), picture)
elif kodi_func.isVideoFormat(video.split(".")[-1]):
kodi_func.addLink(title.decode('utf-8').encode('utf-8') + " - Latviski", video, picture)
print link
except:
xbmcgui.Dialog().ok("Opā!", "Nevarēju dekodēt strīmu", "Iespējams ka fails vairs neeksistē", "Tāda dzīve, mēģini citi avotu")
# This website doesn't have a proper search function, so we must first index it
# These function are unique to this source
def LoadIndexedFile(file):
f = codecs.open(file, "r", "utf-8")
content = f.read()
movies = content.split("\n")
result = []
for movie in movies:
params = movie.split("|")
if len(params) == 3:
result.append({
'title': params[0],
'url': params[1].decode('utf-8'),
'thumb': params[2].decode('utf-8'),
'searchable_title': kodi_func.MakeSearchableString(params[0])
})
else:
print "Something wrong with this movie:", movie
return result
def IndexMovies( baseUrl, fileName ):
progress_dialog = xbmcgui.DialogProgress()
progress_dialog.create("Indeksējam fof.lv")
currentPage = 1
url = baseUrl + str(currentPage)
html = network.getHTML(url)
allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" })
if len(allEntries) == 0:
allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" })
# print allEntries
infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" })
moviesURLs = common.parseDOM(infoTd, "a", ret = "href")
moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style")
if len(moviesThumbnailURLsList) == 0:
moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src")
moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} )
# moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} )
print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList
indexed = 0
# movieEntriesList = common.parseDOM( html, "ul", attrs = { "id": "uEntriesList" })
# screenList = common.parseDOM( movieEntriesList, "div", attrs = {"class": "ve-screen"})
# movieUrls = common.parseDOM(screenList, "a", ret = "href")
# print movieUrls, len(movieUrls)
movieURLIndex = 0
localFile = kodi_func.home + "/resources/"+fileName # xbmc.translatePath('special://temp/'+fileName )
temp = codecs.open( localFile, 'w', "utf-8")
movieIndexes = []
movieEntries = 370
for indexed in range(0, int(movieEntries)):
if movieURLIndex == len(moviesURLs): break
progress = int(float((float(indexed)/int(movieEntries))*100))
# print "Progress: " + str(progress)
progress_dialog.update( progress , "Lūdzu uzgaidi...", "Indeksējam fof.lv Filmas ", "Atlicis: " + str(int(movieEntries) - indexed) )
if (progress_dialog.iscanceled()): return
thumb = moviesThumbnailURLsList[movieURLIndex].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/")
print "thumb: " + thumb
if network.exists( mainURL+thumb ) == False:
thumb = thumb.replace(".jpg", ".png")
# title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i])
title = moviesTitleList[movieURLIndex].partition("<br>")[0]
if not moviesURLs[movieURLIndex].startswith("http://"):
movieURL = mainURL + moviesURLs[movieURLIndex]
else:
movieURL = moviesURLs[i]
print title.encode('utf-8')
temp.write( title +"|" +movieURL +"|" +mainURL+thumb +"\n" )
movieIndexes.append( {'title': title, 'url': movieURL, 'thumb': mainURL+thumb} )
movieURLIndex += 1
if len(moviesURLs) == movieURLIndex:
currentPage += 1
html = network.getHTML(baseUrl+str(currentPage))
allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" })
if len(allEntries) == 0:
allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" })
# print allEntries
infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" })
moviesURLs = common.parseDOM(infoTd, "a", ret = "href")
moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style")
if len(moviesThumbnailURLsList) == 0:
moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src")
moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} )
# print movieUrls, len(movieUrls)
movieURLIndex = 0
temp.close()
return movieIndexes
| gpl-3.0 | 3,629,454,792,757,261,300 | 40.682274 | 213 | 0.67223 | false | 2.782541 | false | false | false |
shifter/rekall | rekall-core/rekall/plugins/windows/dumpcerts.py | 4 | 6522 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Hale Ligh <[email protected]>
# Michael Cohen <[email protected]>
#
# Contributors/References:
# ## Based on sslkeyfinder: http://www.trapkit.de/research/sslkeyfinder/
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
try:
from M2Crypto import X509, RSA
except ImportError:
X509 = RSA = None
from rekall import plugin
from rekall import scan
from rekall import testlib
from rekall import utils
from rekall.plugins import core
from rekall.plugins.windows import common
from rekall.plugins.windows import vadinfo
from rekall.plugins.overlays import basic
class CertScanner(scan.BaseScanner):
"""A scanner for certificate ASN.1 objects.
Yara rules for the two ASN.1 encoded objects we are looking for:
'x509' : 'rule x509 {
strings: $a = {30 82 ?? ?? 30 82 ?? ??} condition: $a
}',
'pkcs' : 'rule pkcs {
strings: $a = {30 82 ?? ?? 02 01 00} condition: $a
}',
These rules are very simple, and so we don't really use Yara for this - its
faster to just scan directly.
"""
checks = [
('StringCheck', dict(needle="\x30\x82"))
]
def scan(self, offset=0, maxlen=None):
for hit in super(CertScanner, self).scan(offset=offset, maxlen=maxlen):
signature = self.address_space.read(hit + 4, 3)
size = self.profile.Object(
"unsigned be short", offset=hit+2, vm=self.address_space)
description = None
if signature.startswith("\x30\x82"):
data = self.address_space.read(hit, size + 4)
if X509:
try:
cert = X509.load_cert_der_string(data)
description = utils.SmartStr(cert.get_subject())
except X509.X509Error:
pass
yield hit, "X509", data, description
elif signature.startswith("\x02\x01\x00"):
data = self.address_space.read(hit, size + 4)
if RSA:
try:
pem = ("-----BEGIN RSA PRIVATE KEY-----\n" +
data.encode("base64") +
"-----END RSA PRIVATE KEY-----")
key = RSA.load_key_string(pem)
description = "Verified: %s" % key.check_key()
except Exception:
pass
yield hit, "RSA", data, description
class CertScan(core.DirectoryDumperMixin, plugin.PhysicalASMixin,
plugin.Command):
"""Dump RSA private and public SSL keys from the physical address space."""
__name = "certscan"
# We can just display the certs instead of dumping them.
dump_dir_optional = True
default_dump_dir = None
def render(self, renderer):
headers = [("Address", "address", "[addrpad]"),
("Type", "type", "10"),
("Length", "length", "10")]
if self.dump_dir:
headers.append(("Filename", "filename", "20"))
headers.append(("Description", "description", ""))
renderer.table_header(headers)
scanner = CertScanner(
address_space=self.physical_address_space,
session=self.session,
profile=basic.Profile32Bits(session=self.session))
for hit, type, data, description in scanner.scan():
args = [hit, type, len(data)]
if self.dump_dir:
filename = "%s.%08X.der" % (type, hit)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode="wb") as fd:
fd.write(data)
args.append(filename)
args.append(description)
renderer.table_row(*args)
class TestCertScan(testlib.HashChecker):
PARAMETERS = dict(
commandline="certscan -D %(tempdir)s",
)
class VadCertScanner(CertScanner, vadinfo.VadScanner):
"""Scanner for certs in vads."""
class CertVadScan(core.DirectoryDumperMixin, common.WinProcessFilter):
"""Scan certificates in process Vads."""
__name = "cert_vad_scan"
# We can just display the certs instead of dumping them.
dump_dir_optional = True
default_dump_dir = None
def render(self, renderer):
headers = [
("Pid", "pid", "5"),
("Command", "command", "10"),
("Address", "address", "[addrpad]"),
("Type", "type", "5"),
("Length", "length", "5")]
if self.dump_dir:
headers.append(("Filename", "filename", "20"))
headers.append(("Description", "description", ""))
renderer.table_header(headers)
for task in self.filter_processes():
scanner = VadCertScanner(task=task)
for hit, type, data, description in scanner.scan():
args = [task.UniqueProcessId, task.ImageFileName,
hit, type, len(data)]
if self.dump_dir:
filename = "%s.%s.%08X.der" % (
task.UniqueProcessId, type, hit)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode="wb") as fd:
fd.write(data)
args.append(filename)
args.append(description)
renderer.table_row(*args)
class TestCertVadScan(testlib.HashChecker):
PARAMETERS = dict(
commandline="cert_vad_scan --proc_regex %(regex)s -D %(tempdir)s ",
regex="csrss.exe"
)
| gpl-2.0 | -8,258,718,768,978,003,000 | 31.61 | 79 | 0.565011 | false | 4.183451 | false | false | false |
giliam/sharbrary | discussion/migrations/0001_initial.py | 1 | 1166 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Discussion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name='title')),
('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the library')),
('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')),
('status', models.CharField(default=b'OP', max_length=200, verbose_name='status of the topic', choices=[(b'OP', b'OPEN'), (b'CL', b'CLOSED'), (b'DE', b'DELETED'), (b'AR', b'ARCHIVED')])),
('author', models.ForeignKey(verbose_name='author', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
]
| gpl-2.0 | 5,704,729,064,769,348,000 | 43.846154 | 203 | 0.614923 | false | 3.993151 | false | false | false |
ambitioninc/newrelic-api | newrelic_api/servers.py | 1 | 10813 | from .base import Resource
class Servers(Resource):
"""
An interface for interacting with the NewRelic server API.
"""
def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None):
"""
This API endpoint returns a paginated list of the Servers
associated with your New Relic account. Servers can be filtered
by their name or by a list of server IDs.
:type filter_name: str
:param filter_name: Filter by server name
:type filter_ids: list of ints
:param filter_ids: Filter by server ids
:type filter_labels: dict of label type: value pairs
:param filter_labels: Filter by server labels
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"servers": [
{
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers.json?page=2",
"rel": "next"
}
}
}
"""
label_param = ''
if filter_labels:
label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()])
filters = [
'filter[name]={0}'.format(filter_name) if filter_name else None,
'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None,
'filter[labels]={0}'.format(label_param) if filter_labels else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}servers.json'.format(self.URL),
headers=self.headers,
params=self.build_param_string(filters)
)
def show(self, id):
"""
This API endpoint returns a single Server, identified its ID.
:type id: int
:param id: Server ID
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
}
"""
return self._get(
url='{0}servers/{1}.json'.format(self.URL, id),
headers=self.headers,
)
def update(self, id, name=None):
"""
Updates any of the optional parameters of the server
:type id: int
:param id: Server ID
:type name: str
:param name: The name of the server
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
}
"""
nr_data = self.show(id)['server']
data = {
'server': {
'name': name or nr_data['name'],
}
}
return self._put(
url='{0}servers/{1}.json'.format(self.URL, id),
headers=self.headers,
data=data
)
def delete(self, id):
"""
This API endpoint deletes an server and all of its reported data.
WARNING: Only servers that have stopped reporting can be deleted.
This is an irreversible process which will delete all reported
data for this server.
:type id: int
:param id: Server ID
:rtype: dict
:return: The JSON response of the API
::
{
"server": {
"id": "integer",
"account_id": "integer",
"name": "string",
"host": "string",
"reporting": "boolean",
"last_reported_at": "time",
"summary": {
"cpu": "float",
"cpu_stolen": "float",
"disk_io": "float",
"memory": "float",
"memory_used": "integer",
"memory_total": "integer",
"fullest_disk": "float",
"fullest_disk_free": "integer"
}
}
}
"""
return self._delete(
url='{0}servers/{1}.json'.format(
self.URL,
id),
headers=self.headers,
)
def metric_names(self, id, name=None, page=None):
"""
Return a list of known metrics and their value names for the given resource.
:type id: int
:param id: Server ID
:type name: str
:param name: Filter metrics by name
:type page: int
:param page: Pagination index
:rtype: dict
:return: The JSON response of the API, with an additional 'pages' key
if there are paginated results
::
{
"metrics": [
{
"name": "string",
"values": [
"string"
]
}
],
"pages": {
"last": {
"url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2",
"rel": "last"
},
"next": {
"url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2",
"rel": "next"
}
}
}
"""
params = [
'name={0}'.format(name) if name else None,
'page={0}'.format(page) if page else None
]
return self._get(
url='{0}servers/{1}/metrics.json'.format(self.URL, id),
headers=self.headers,
params=self.build_param_string(params)
)
def metric_data(
self, id, names, values=None, from_dt=None, to_dt=None,
summarize=False):
"""
This API endpoint returns a list of values for each of the requested
metrics. The list of available metrics can be returned using the Metric
Name API endpoint. Metric data can be filtered by a number of
parameters, including multiple names and values, and by time range.
Metric names and values will be matched intelligently in the
background. You can also retrieve a summarized data point across the
entire time range selected by using the summarize parameter.
**Note** All times sent and received are formatted in UTC. The default
time range is the last 30 minutes.
:type id: int
:param id: Server ID
:type names: list of str
:param names: Retrieve specific metrics by name
:type values: list of str
:param values: Retrieve specific metric values
:type from_dt: datetime
:param from_dt: Retrieve metrics after this time
:type to_dt: datetime
:param to_dt: Retrieve metrics before this time
:type summarize: bool
:param summarize: Summarize the data
:rtype: dict
:return: The JSON response of the API
::
{
"metric_data": {
"from": "time",
"to": "time",
"metrics": [
{
"name": "string",
"timeslices": [
{
"from": "time",
"to": "time",
"values": "hash"
}
]
}
]
}
}
"""
params = [
'from={0}'.format(from_dt) if from_dt else None,
'to={0}'.format(to_dt) if to_dt else None,
'summarize=true' if summarize else None
]
params += ['names[]={0}'.format(name) for name in names]
if values:
params += ['values[]={0}'.format(value) for value in values]
return self._get(
url='{0}servers/{1}/metrics/data.json'.format(self.URL, id),
headers=self.headers,
params=self.build_param_string(params)
)
| mit | 4,022,822,439,138,506,000 | 30.524781 | 111 | 0.420235 | false | 4.831546 | false | false | false |
yongshengwang/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlwt/Workbook.py | 57 | 20514 | # -*- coding: windows-1252 -*-
'''
Record Order in BIFF8
Workbook Globals Substream
BOF Type = workbook globals
Interface Header
MMS
Interface End
WRITEACCESS
CODEPAGE
DSF
TABID
FNGROUPCOUNT
Workbook Protection Block
WINDOWPROTECT
PROTECT
PASSWORD
PROT4REV
PROT4REVPASS
BACKUP
HIDEOBJ
WINDOW1
DATEMODE
PRECISION
REFRESHALL
BOOKBOOL
FONT +
FORMAT *
XF +
STYLE +
? PALETTE
USESELFS
BOUNDSHEET +
COUNTRY
? Link Table
SST
ExtSST
EOF
'''
import BIFFRecords
import Style
class Workbook(object):
#################################################################
## Constructor
#################################################################
def __init__(self, encoding='ascii', style_compression=0):
self.encoding = encoding
self.__owner = 'None'
self.__country_code = None # 0x07 is Russia :-)
self.__wnd_protect = 0
self.__obj_protect = 0
self.__protect = 0
self.__backup_on_save = 0
# for WINDOW1 record
self.__hpos_twips = 0x01E0
self.__vpos_twips = 0x005A
self.__width_twips = 0x3FCF
self.__height_twips = 0x2A4E
self.__active_sheet = 0
self.__first_tab_index = 0
self.__selected_tabs = 0x01
self.__tab_width_twips = 0x0258
self.__wnd_hidden = 0
self.__wnd_mini = 0
self.__hscroll_visible = 1
self.__vscroll_visible = 1
self.__tabs_visible = 1
self.__styles = Style.StyleCollection(style_compression)
self.__dates_1904 = 0
self.__use_cell_values = 1
self.__sst = BIFFRecords.SharedStringTable(self.encoding)
self.__worksheets = []
self.__worksheet_idx_from_name = {}
self.__sheet_refs = {}
self._supbook_xref = {}
self._xcall_xref = {}
self._ownbook_supbookx = None
self._ownbook_supbook_ref = None
self._xcall_supbookx = None
self._xcall_supbook_ref = None
#################################################################
## Properties, "getters", "setters"
#################################################################
def get_style_stats(self):
return self.__styles.stats[:]
def set_owner(self, value):
self.__owner = value
def get_owner(self):
return self.__owner
owner = property(get_owner, set_owner)
#################################################################
def set_country_code(self, value):
self.__country_code = value
def get_country_code(self):
return self.__country_code
country_code = property(get_country_code, set_country_code)
#################################################################
def set_wnd_protect(self, value):
self.__wnd_protect = int(value)
def get_wnd_protect(self):
return bool(self.__wnd_protect)
wnd_protect = property(get_wnd_protect, set_wnd_protect)
#################################################################
def set_obj_protect(self, value):
self.__obj_protect = int(value)
def get_obj_protect(self):
return bool(self.__obj_protect)
obj_protect = property(get_obj_protect, set_obj_protect)
#################################################################
def set_protect(self, value):
self.__protect = int(value)
def get_protect(self):
return bool(self.__protect)
protect = property(get_protect, set_protect)
#################################################################
def set_backup_on_save(self, value):
self.__backup_on_save = int(value)
def get_backup_on_save(self):
return bool(self.__backup_on_save)
backup_on_save = property(get_backup_on_save, set_backup_on_save)
#################################################################
def set_hpos(self, value):
self.__hpos_twips = value & 0xFFFF
def get_hpos(self):
return self.__hpos_twips
hpos = property(get_hpos, set_hpos)
#################################################################
def set_vpos(self, value):
self.__vpos_twips = value & 0xFFFF
def get_vpos(self):
return self.__vpos_twips
vpos = property(get_vpos, set_vpos)
#################################################################
def set_width(self, value):
self.__width_twips = value & 0xFFFF
def get_width(self):
return self.__width_twips
width = property(get_width, set_width)
#################################################################
def set_height(self, value):
self.__height_twips = value & 0xFFFF
def get_height(self):
return self.__height_twips
height = property(get_height, set_height)
#################################################################
def set_active_sheet(self, value):
self.__active_sheet = value & 0xFFFF
self.__first_tab_index = self.__active_sheet
def get_active_sheet(self):
return self.__active_sheet
active_sheet = property(get_active_sheet, set_active_sheet)
#################################################################
def set_tab_width(self, value):
self.__tab_width_twips = value & 0xFFFF
def get_tab_width(self):
return self.__tab_width_twips
tab_width = property(get_tab_width, set_tab_width)
#################################################################
def set_wnd_visible(self, value):
self.__wnd_hidden = int(not value)
def get_wnd_visible(self):
return not bool(self.__wnd_hidden)
wnd_visible = property(get_wnd_visible, set_wnd_visible)
#################################################################
def set_wnd_mini(self, value):
self.__wnd_mini = int(value)
def get_wnd_mini(self):
return bool(self.__wnd_mini)
wnd_mini = property(get_wnd_mini, set_wnd_mini)
#################################################################
def set_hscroll_visible(self, value):
self.__hscroll_visible = int(value)
def get_hscroll_visible(self):
return bool(self.__hscroll_visible)
hscroll_visible = property(get_hscroll_visible, set_hscroll_visible)
#################################################################
def set_vscroll_visible(self, value):
self.__vscroll_visible = int(value)
def get_vscroll_visible(self):
return bool(self.__vscroll_visible)
vscroll_visible = property(get_vscroll_visible, set_vscroll_visible)
#################################################################
def set_tabs_visible(self, value):
self.__tabs_visible = int(value)
def get_tabs_visible(self):
return bool(self.__tabs_visible)
tabs_visible = property(get_tabs_visible, set_tabs_visible)
#################################################################
def set_dates_1904(self, value):
self.__dates_1904 = int(value)
def get_dates_1904(self):
return bool(self.__dates_1904)
dates_1904 = property(get_dates_1904, set_dates_1904)
#################################################################
def set_use_cell_values(self, value):
self.__use_cell_values = int(value)
def get_use_cell_values(self):
return bool(self.__use_cell_values)
use_cell_values = property(get_use_cell_values, set_use_cell_values)
#################################################################
def get_default_style(self):
return self.__styles.default_style
default_style = property(get_default_style)
##################################################################
## Methods
##################################################################
def add_style(self, style):
return self.__styles.add(style)
def add_str(self, s):
return self.__sst.add_str(s)
def del_str(self, sst_idx):
self.__sst.del_str(sst_idx)
def str_index(self, s):
return self.__sst.str_index(s)
def add_sheet(self, sheetname, cell_overwrite_ok=False):
import Worksheet, Utils
if not isinstance(sheetname, unicode):
sheetname = sheetname.decode(self.encoding)
if not Utils.valid_sheet_name(sheetname):
raise Exception("invalid worksheet name %r" % sheetname)
lower_name = sheetname.lower()
if lower_name in self.__worksheet_idx_from_name:
raise Exception("duplicate worksheet name %r" % sheetname)
self.__worksheet_idx_from_name[lower_name] = len(self.__worksheets)
self.__worksheets.append(Worksheet.Worksheet(sheetname, self, cell_overwrite_ok))
return self.__worksheets[-1]
def get_sheet(self, sheetnum):
return self.__worksheets[sheetnum]
def raise_bad_sheetname(self, sheetname):
raise Exception("Formula: unknown sheet name %s" % sheetname)
def convert_sheetindex(self, strg_ref, n_sheets):
idx = int(strg_ref)
if 0 <= idx < n_sheets:
return idx
msg = "Formula: sheet index (%s) >= number of sheets (%d)" % (strg_ref, n_sheets)
raise Exception(msg)
def _get_supbook_index(self, tag):
if tag in self._supbook_xref:
return self._supbook_xref[tag]
self._supbook_xref[tag] = idx = len(self._supbook_xref)
return idx
def setup_ownbook(self):
self._ownbook_supbookx = self._get_supbook_index(('ownbook', 0))
self._ownbook_supbook_ref = None
reference = (self._ownbook_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._ownbook_supbook_ref = len(self.__sheet_refs)
def setup_xcall(self):
self._xcall_supbookx = self._get_supbook_index(('xcall', 0))
self._xcall_supbook_ref = None
reference = (self._xcall_supbookx, 0xFFFE, 0xFFFE)
if reference in self.__sheet_refs:
raise Exception("can't happen")
self.__sheet_refs[reference] = self._xcall_supbook_ref = len(self.__sheet_refs)
def add_sheet_reference(self, formula):
patches = []
n_sheets = len(self.__worksheets)
sheet_refs, xcall_refs = formula.get_references()
for ref0, ref1, offset in sheet_refs:
if not ref0.isdigit():
try:
ref0n = self.__worksheet_idx_from_name[ref0.lower()]
except KeyError:
self.raise_bad_sheetname(ref0)
else:
ref0n = self.convert_sheetindex(ref0, n_sheets)
if ref1 == ref0:
ref1n = ref0n
elif not ref1.isdigit():
try:
ref1n = self.__worksheet_idx_from_name[ref1.lower()]
except KeyError:
self.raise_bad_sheetname(ref1)
else:
ref1n = self.convert_sheetindex(ref1, n_sheets)
if ref1n < ref0n:
msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \
% (ref0, ref1, ref0n, ref1n)
raise Exception(msg)
if self._ownbook_supbookx is None:
self.setup_ownbook()
reference = (self._ownbook_supbookx, ref0n, ref1n)
if reference in self.__sheet_refs:
patches.append((offset, self.__sheet_refs[reference]))
else:
nrefs = len(self.__sheet_refs)
if nrefs > 65535:
raise Exception('More than 65536 inter-sheet references')
self.__sheet_refs[reference] = nrefs
patches.append((offset, nrefs))
for funcname, offset in xcall_refs:
if self._ownbook_supbookx is None:
self.setup_ownbook()
if self._xcall_supbookx is None:
self.setup_xcall()
# print funcname, self._supbook_xref
patches.append((offset, self._xcall_supbook_ref))
if not isinstance(funcname, unicode):
funcname = funcname.decode(self.encoding)
if funcname in self._xcall_xref:
idx = self._xcall_xref[funcname]
else:
self._xcall_xref[funcname] = idx = len(self._xcall_xref)
patches.append((offset + 2, idx + 1))
formula.patch_references(patches)
##################################################################
## BIFF records generation
##################################################################
def __bof_rec(self):
return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.BOOK_GLOBAL).get()
def __eof_rec(self):
return BIFFRecords.EOFRecord().get()
def __intf_hdr_rec(self):
return BIFFRecords.InteraceHdrRecord().get()
def __intf_end_rec(self):
return BIFFRecords.InteraceEndRecord().get()
def __intf_mms_rec(self):
return BIFFRecords.MMSRecord().get()
def __write_access_rec(self):
return BIFFRecords.WriteAccessRecord(self.__owner).get()
def __wnd_protect_rec(self):
return BIFFRecords.WindowProtectRecord(self.__wnd_protect).get()
def __obj_protect_rec(self):
return BIFFRecords.ObjectProtectRecord(self.__obj_protect).get()
def __protect_rec(self):
return BIFFRecords.ProtectRecord(self.__protect).get()
def __password_rec(self):
return BIFFRecords.PasswordRecord().get()
def __prot4rev_rec(self):
return BIFFRecords.Prot4RevRecord().get()
def __prot4rev_pass_rec(self):
return BIFFRecords.Prot4RevPassRecord().get()
def __backup_rec(self):
return BIFFRecords.BackupRecord(self.__backup_on_save).get()
def __hide_obj_rec(self):
return BIFFRecords.HideObjRecord().get()
def __window1_rec(self):
flags = 0
flags |= (self.__wnd_hidden) << 0
flags |= (self.__wnd_mini) << 1
flags |= (self.__hscroll_visible) << 3
flags |= (self.__vscroll_visible) << 4
flags |= (self.__tabs_visible) << 5
return BIFFRecords.Window1Record(self.__hpos_twips, self.__vpos_twips,
self.__width_twips, self.__height_twips,
flags,
self.__active_sheet, self.__first_tab_index,
self.__selected_tabs, self.__tab_width_twips).get()
def __codepage_rec(self):
return BIFFRecords.CodepageBiff8Record().get()
def __country_rec(self):
if not self.__country_code:
return ''
return BIFFRecords.CountryRecord(self.__country_code, self.__country_code).get()
def __dsf_rec(self):
return BIFFRecords.DSFRecord().get()
def __tabid_rec(self):
return BIFFRecords.TabIDRecord(len(self.__worksheets)).get()
def __fngroupcount_rec(self):
return BIFFRecords.FnGroupCountRecord().get()
def __datemode_rec(self):
return BIFFRecords.DateModeRecord(self.__dates_1904).get()
def __precision_rec(self):
return BIFFRecords.PrecisionRecord(self.__use_cell_values).get()
def __refresh_all_rec(self):
return BIFFRecords.RefreshAllRecord().get()
def __bookbool_rec(self):
return BIFFRecords.BookBoolRecord().get()
def __all_fonts_num_formats_xf_styles_rec(self):
return self.__styles.get_biff_data()
def __palette_rec(self):
result = ''
return result
def __useselfs_rec(self):
return BIFFRecords.UseSelfsRecord().get()
def __boundsheets_rec(self, data_len_before, data_len_after, sheet_biff_lens):
# .................................
# BOUNDSEHEET0
# BOUNDSEHEET1
# BOUNDSEHEET2
# ..................................
# WORKSHEET0
# WORKSHEET1
# WORKSHEET2
boundsheets_len = 0
for sheet in self.__worksheets:
boundsheets_len += len(BIFFRecords.BoundSheetRecord(
0x00L, sheet.visibility, sheet.name, self.encoding
).get())
start = data_len_before + boundsheets_len + data_len_after
result = ''
for sheet_biff_len, sheet in zip(sheet_biff_lens, self.__worksheets):
result += BIFFRecords.BoundSheetRecord(
start, sheet.visibility, sheet.name, self.encoding
).get()
start += sheet_biff_len
return result
def __all_links_rec(self):
pieces = []
temp = [(idx, tag) for tag, idx in self._supbook_xref.items()]
temp.sort()
for idx, tag in temp:
stype, snum = tag
if stype == 'ownbook':
rec = BIFFRecords.InternalReferenceSupBookRecord(len(self.__worksheets)).get()
pieces.append(rec)
elif stype == 'xcall':
rec = BIFFRecords.XcallSupBookRecord().get()
pieces.append(rec)
temp = [(idx, name) for name, idx in self._xcall_xref.items()]
temp.sort()
for idx, name in temp:
rec = BIFFRecords.ExternnameRecord(
options=0, index=0, name=name, fmla='\x02\x00\x1c\x17').get()
pieces.append(rec)
else:
raise Exception('unknown supbook stype %r' % stype)
if len(self.__sheet_refs) > 0:
# get references in index order
temp = [(idx, ref) for ref, idx in self.__sheet_refs.items()]
temp.sort()
temp = [ref for idx, ref in temp]
externsheet_record = BIFFRecords.ExternSheetRecord(temp).get()
pieces.append(externsheet_record)
return ''.join(pieces)
def __sst_rec(self):
return self.__sst.get_biff_record()
def __ext_sst_rec(self, abs_stream_pos):
return ''
#return BIFFRecords.ExtSSTRecord(abs_stream_pos, self.sst_record.str_placement,
#self.sst_record.portions_len).get()
def get_biff_data(self):
before = ''
before += self.__bof_rec()
before += self.__intf_hdr_rec()
before += self.__intf_mms_rec()
before += self.__intf_end_rec()
before += self.__write_access_rec()
before += self.__codepage_rec()
before += self.__dsf_rec()
before += self.__tabid_rec()
before += self.__fngroupcount_rec()
before += self.__wnd_protect_rec()
before += self.__protect_rec()
before += self.__obj_protect_rec()
before += self.__password_rec()
before += self.__prot4rev_rec()
before += self.__prot4rev_pass_rec()
before += self.__backup_rec()
before += self.__hide_obj_rec()
before += self.__window1_rec()
before += self.__datemode_rec()
before += self.__precision_rec()
before += self.__refresh_all_rec()
before += self.__bookbool_rec()
before += self.__all_fonts_num_formats_xf_styles_rec()
before += self.__palette_rec()
before += self.__useselfs_rec()
country = self.__country_rec()
all_links = self.__all_links_rec()
shared_str_table = self.__sst_rec()
after = country + all_links + shared_str_table
ext_sst = self.__ext_sst_rec(0) # need fake cause we need calc stream pos
eof = self.__eof_rec()
self.__worksheets[self.__active_sheet].selected = True
sheets = ''
sheet_biff_lens = []
for sheet in self.__worksheets:
data = sheet.get_biff_data()
sheets += data
sheet_biff_lens.append(len(data))
bundlesheets = self.__boundsheets_rec(len(before), len(after)+len(ext_sst)+len(eof), sheet_biff_lens)
sst_stream_pos = len(before) + len(bundlesheets) + len(country) + len(all_links)
ext_sst = self.__ext_sst_rec(sst_stream_pos)
return before + bundlesheets + after + ext_sst + eof + sheets
def save(self, filename):
import CompoundDoc
doc = CompoundDoc.XlsDoc()
doc.save(filename, self.get_biff_data())
| apache-2.0 | 7,327,411,666,094,557,000 | 31.254717 | 109 | 0.522131 | false | 3.917128 | false | false | false |
Agicia/lpod-python | scripts/lpod-highlight.py | 1 | 4772 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2009-2010 Ars Aperta, Itaapy, Pierlis, Talend.
#
# Authors: Hervé Cauwelier <[email protected]>
#
# This file is part of Lpod (see: http://lpod-project.org).
# Lpod is free software; you can redistribute it and/or modify it under
# the terms of either:
#
# a) the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option)
# any later version.
# Lpod is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Lpod. If not, see <http://www.gnu.org/licenses/>.
#
# b) the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Import from the standard library
from optparse import OptionParser
from sys import exit, stdin
# Import from lpod
from lpod import __version__
from lpod.document import odf_get_document
from lpod.scriptutils import add_option_output, printinfo
from lpod.style import odf_create_style
from lpod.styles import rgb2hex
def highlight(odf_file_url, pattern, color=None, background_color=None,
italic=False, bold=False, target=None, pretty=True):
# Make display_name and name
display_name = [u"Highlight"]
if color and color != 'none':
display_name.append(unicode(color).capitalize())
if background_color and background_color != 'none':
display_name.append(unicode(background_color).capitalize())
if italic:
display_name.append(u"Italic")
if bold:
display_name.append(u"Bold")
display_name = u" ".join(display_name)
name = display_name.replace(u" ", u"_20_")
# Is our style already installed?
style = document.get_style('text', name)
if style is None:
color = rgb2hex(color) if color != 'none' else None
background_color = (rgb2hex(background_color)
if background_color != 'none' else None)
style = odf_create_style('text', name,
italic=italic, bold=bold, color=color,
background_color=background_color)
document.insert_style(style, automatic=True)
# Patch!
body = document.get_body()
i = -1
for i, paragraph in enumerate(body.get_paragraphs(content=pattern) +
body.get_headings(content=pattern)):
# Don't colour the table of content
if paragraph.get_parent().get_tag() in ('text:index-title',
'text:index-body'):
continue
paragraph.set_span(name, regex=pattern)
document.save(target=target, pretty=pretty)
printinfo((i + 1), "paragraphs changed (0 error, 0 warning).")
if __name__ == '__main__':
# Options initialisation
usage = '%prog <file> <pattern>'
description = ("highlight the text matching the given regular "
"expression (Python syntax). May not display in some "
"office suites.")
parser = OptionParser(usage, version=__version__,
description=description)
# --color
help = ("the name or #rrggbb color of the font color: black, blue, "
"brown, cyan, green, grey, magenta, orange, pink, red, violet, "
"white, yellow or none (default)")
parser.add_option('-c', '--color', default='none', metavar='COLOR',
help=help)
# --background
help = ("the name or #rrggbb color of the background color: black, "
"blue, brown, cyan, green, grey, magenta, orange, pink, red, "
"violet, white, yellow (default) or none")
parser.add_option('-g', '--background', default='yellow',
metavar='BACKGROUND', help=help)
# --italic
parser.add_option('-i', '--italic', dest='italic', action='store_true',
default=False, help='set the italic font style')
# --bold
parser.add_option('-b', '--bold', dest='bold', action='store_true',
default=False, help='set the bold font weight')
# --output
add_option_output(parser)
# Parse options
options, args = parser.parse_args()
if len(args) != 2:
parser.print_help()
exit(1)
odf_file_url, pattern = args
pattern = unicode(pattern, stdin.encoding)
document = odf_get_document(odf_file_url)
highlight(document, pattern, options.color, options.background,
options.italic, options.bold, target=options.output)
| apache-2.0 | -8,872,036,109,267,874,000 | 38.758333 | 76 | 0.642423 | false | 3.724434 | false | false | false |
jetyang2005/elastalert | elastalert/alerts_extend_db.py | 1 | 2277 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#from __future__ import unicode_literals
import datetime
from alerts import Alerter, BasicMatchString
from util import elastalert_logger
from db_sqlconn import Mysql
'''
#################################################################
# 推送消息到数据库中,便于后继查询 #
#################################################################
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class DBAlerter(Alerter):
# 数据库IP地址,数据库名称,用户名和口令必填
def __init__(self, *args):
super(DBAlerter, self).__init__(*args)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
self.senddata(body)
elastalert_logger.info("send message to %s" % "admin")
def senddata(self, content):
mysql = Mysql(self.rule)
now = datetime.datetime.now()
now = now.strftime("%Y-%m-%d %H:%M:%S")
insert_sql = 'insert into link_alert(' \
'alert_ruleid, '\
'alert_rule, '\
'alert_userid,' \
'alert_username,' \
'alert_channel,' \
'alert_account,' \
'alert_message,' \
'alert_time,' \
'alert_status' \
') values ' \
'(%s,%s,%s,%s,%s,%s,%s,%s,"0")'
for alertperson in self.rule['alertpersons']:
insert_data = [self.rule['rule_id'],
self.rule['name'],
alertperson['user_id'],
alertperson['user_name'],
self.rule['alert_way'],
alertperson['user_email'],
content,
now]
mysql.insertOne(insert_sql, insert_data)
mysql.dispose()
elastalert_logger.info("send msg and response: %s" % content)
def get_info(self):
return {'type': 'DBAlerter'}
| apache-2.0 | -2,756,643,408,934,341,000 | 25.566265 | 69 | 0.446712 | false | 3.958707 | false | false | false |
haphaeu/yoshimi | EulerProject/067_dag.py | 1 | 1839 | def dagTriang(m, size):
dist=[]
for i in range(size):
dist.append([])
for j in range(i+1):
dist[i].append(0)
#initial setup
dist[0][0]=m[0][0]
for icur in range(size):
for jcur in range(icur+1):
try:
#print icur, jcur
#check neighbors
#1: left
if icur<size-1:
alt= dist[icur][jcur] + m[icur+1][jcur]
if alt > dist[icur+1][jcur]:
dist[icur+1][jcur] = alt
#2: right
if jcur<=icur:
alt= dist[icur][jcur] + m[icur+1][jcur+1]
if alt > dist[icur+1][jcur+1]:
dist[icur+1][jcur+1] = alt
except: #for debugging reasons
print "error"
print dist
raise
return dist
# ### MAIN
url="http://projecteuler.net/project/triangle.txt"
import urllib
page=urllib.urlopen(url)
contents=page.read()
contents=contents.split('\r\n')
contents.pop()
m=[v.split(' ') for v in contents]
for i in range(len(m)):
for j in range(i+1):
m[i][j]=int(m[i][j])
"""
m=[[3],[7,4],[2,4,6],[8,5,9,3]]
m=[[75],
[95, 64],
[17, 47, 82],
[18, 35, 87, 10],
[20, 04, 82, 47, 65],
[19, 01, 23, 75, 03, 34],
[88, 02, 77, 73, 07, 63, 67],
[99, 65, 04, 28, 06, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 04, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[04, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 04, 23]]
"""
size=len(m)
dists=dagTriang(m,size)
print max(dists[-1])
#output
#7273 | lgpl-3.0 | -3,430,795,768,939,448,000 | 26.462687 | 65 | 0.466014 | false | 2.665217 | false | false | false |
gklyne/annalist | src/annalist_root/annalist/models/collectiontypecache.py | 1 | 8754 | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module is used to cache per-collection type information.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2017, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist import layout
from annalist.exceptions import Annalist_Error
from annalist.identifiers import ANNAL, RDFS
from annalist.models.collectionentitycache import (
Cache_Error, CollectionEntityCacheObject, CollectionEntityCache
)
from annalist.models.closurecache import ClosureCache
from annalist.models.recordtype import RecordType
# ---------------------------------------------------------------------------
#
# Type-cache object class
#
# ---------------------------------------------------------------------------
#@@@ supertype_closure_cache = {}
class CollectionTypeCacheObject(CollectionEntityCacheObject):
"""
This class is a type cache for a specified collection.
It extends class CollectionEntityCacheObject with type-specific logic; notably
overriding method _load_entity with additional logic to maintain a supertype
closure cache, and methods to access that cache.
"""
def __init__(self, coll_id, entity_cls=RecordType):
"""
Initialize a cache object for a specified collection.
coll_id Collection id with which the type cache is associated.
"""
super(CollectionTypeCacheObject, self).__init__(coll_id, entity_cls)
#@@@ supertype_closure_cache[coll_id]
self._supertype_closure_cache = ClosureCache(coll_id, ANNAL.CURIE.supertype_uri)
return
def _gsupertype_cache(self):
return self._supertype_closure_cache #@@@ supertype_closure_cache.get(self.get_coll_id(), None)
def _load_entity(self, coll, type_entity):
"""
Internal helper method loads type data to cache.
Also updates supertype closure cache.
Returns True if new type was added.
"""
type_id = type_entity.get_id()
type_uri = type_entity.get_uri()
type_parent = type_entity.get_parent().get_id()
type_data = type_entity.get_save_values()
add_type = super(CollectionTypeCacheObject, self)._load_entity(coll, type_entity)
if add_type:
# Add relations for supertype references from the new type URI
for supertype_obj in type_data.get(ANNAL.CURIE.supertype_uri, []):
supertype_uri = supertype_obj["@id"]
self._gsupertype_cache().add_rel(type_uri, supertype_uri)
# Also add relations for references *to* the new type URI
for try_subtype in self.get_all_entities(coll):
sub_st_objs = try_subtype.get(ANNAL.CURIE.supertype_uri, [])
sub_st_uris = [ sub_st_obj["@id"] for sub_st_obj in sub_st_objs ]
if type_uri in sub_st_uris:
subtype_uri = try_subtype.get(ANNAL.CURIE.uri, None)
if subtype_uri:
self._gsupertype_cache().add_rel(subtype_uri, type_uri)
return add_type
def _drop_entity(self, coll, type_id):
"""
Override method that drops entity from cache, to also remove references
from the supertype closure cache.
Returns the type entity removed, or None if not found.
"""
type_entity = super(CollectionTypeCacheObject, self)._drop_entity(coll, type_id)
if type_entity:
type_uri = type_entity.get_uri()
self._gsupertype_cache().remove_val(type_uri)
return type_entity
def get_type_uri_supertype_uris(self, type_uri):
"""
Returns all supertype URIs for a specified type URI.
Returns all supertype URIs, even those for which there
is no defined type entity.
"""
return self._gsupertype_cache().fwd_closure(type_uri)
def get_type_uri_subtype_uris(self, type_uri):
"""
Returns all subtype URIs for a specified type URI.
Returns all subtype URIs, even those for which there
is no defined type entity.
"""
return self._gsupertype_cache().rev_closure(type_uri)
def get_type_uri_supertypes(self, coll, type_uri):
"""
Returns all supertypes for a specified type URI.
This method returns only those supertypes that are defined as entities.
"""
self._load_entities(coll)
for st_uri in self.get_type_uri_supertype_uris(type_uri):
st = self.get_entity_from_uri(coll, st_uri)
if st:
yield st
return
def get_type_uri_subtypes(self, coll, type_uri):
"""
Returns all subtypes for a specified type URI.
This method returns only those subtypes that are defined as entities.
"""
self._load_entities(coll)
for st_uri in self.get_type_uri_subtype_uris(type_uri):
st = self.get_entity_from_uri(coll, st_uri)
if st:
yield st
return
def remove_cache(self):
"""
Close down and release all type cache data
"""
# log.debug("@@@@remove type cache %r"%(self.get_coll_id(),))
super(CollectionTypeCacheObject, self).remove_cache()
self._supertype_closure_cache.remove_cache()
self._supertype_closure_cache = None
return
# ---------------------------------------------------------------------------
#
# Collection type-cache class
#
# ---------------------------------------------------------------------------
class CollectionTypeCache(CollectionEntityCache):
"""
This class manages type cache objects over multiple collections
"""
def __init__(self):
"""
Initialize.
Initializes a value cache cache with no per-collection data.
"""
super(CollectionTypeCache, self).__init__(CollectionTypeCacheObject, RecordType)
return
# Collection type cache allocation and access methods
def set_type(self, coll, type_entity):
"""
Save a new or updated type definition
"""
return self.set_entity(coll, type_entity)
def remove_type(self, coll, type_id):
"""
Remove type from collection type cache.
Returns the type entity removed if found, or None if not defined.
"""
return self.remove_entity(coll, type_id)
def get_type(self, coll, type_id):
"""
Retrieve a type description for a given type Id.
Returns a type object for the specified collection and type Id.
"""
return self.get_entity(coll, type_id)
def get_type_from_uri(self, coll, type_uri):
"""
Retrieve a type description for a given type URI.
Returns a type object for the specified collection and type URI.
"""
return self.get_entity_from_uri(coll, type_uri)
def get_all_type_ids(self, coll, altscope=None):
"""
Returns all types currently available for a collection in the indicated scope.
Default scope is types defined directly in the indicated collection.
"""
return self.get_all_entity_ids(coll, altscope=altscope)
def get_all_types(self, coll, altscope=None):
"""
Returns all types currently available for a collection in the indicated scope.
Default scope is types defined directly in the indicated collection.
"""
return self.get_all_entities(coll, altscope=altscope)
def get_type_uri_supertypes(self, coll, type_uri):
"""
Returns all supertypes for a specieid type URI.
"""
type_cache = self._get_cache(coll)
return type_cache.get_type_uri_supertypes(coll, type_uri)
def get_type_uri_subtypes(self, coll, type_uri):
"""
Returns all subtypes for a specieid type URI.
"""
type_cache = self._get_cache(coll)
return type_cache.get_type_uri_subtypes(coll, type_uri)
def get_type_uri_supertype_uris(self, coll, type_uri):
"""
Returns all supertypes for a specieid type URI.
"""
type_cache = self._get_cache(coll)
return type_cache.get_type_uri_supertype_uris(type_uri)
def get_type_uri_subtype_uris(self, coll, type_uri):
"""
Returns all subtypes for a specieid type URI.
"""
type_cache = self._get_cache(coll)
return type_cache.get_type_uri_subtype_uris(type_uri)
# End.
| mit | 4,731,550,999,440,760,000 | 35.173554 | 103 | 0.600069 | false | 4.106004 | false | false | false |
buffer/peepdf | peepdf/PDFCrypto.py | 1 | 13561 | #
# peepdf is a tool to analyse and modify PDF files
# http://peepdf.eternal-todo.com
# By Jose Miguel Esparza <jesparza AT eternal-todo.com>
#
# Copyright (C) 2011-2017 Jose Miguel Esparza
#
# This file is part of peepdf.
#
# peepdf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# peepdf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with peepdf. If not, see <http://www.gnu.org/licenses/>.
#
'''
Module to manage cryptographic operations with PDF files
'''
import hashlib
import struct
import random
import warnings
import sys
import peepdf.aes
from itertools import cycle, izip
warnings.filterwarnings("ignore")
paddingString = '\x28\xBF\x4E\x5E\x4E\x75\x8A\x41\x64\x00\x4E\x56\xFF\xFA\x01\x08\x2E\x2E\x00\xB6\xD0\x68\x3E\x80\x2F\x0C\xA9\xFE\x64\x53\x69\x7A'
def computeEncryptionKey(password, dictOwnerPass, dictUserPass, dictOE, dictUE, fileID, pElement, dictKeyLength=128, revision=3, encryptMetadata=False, passwordType=None):
'''
Compute an encryption key to encrypt/decrypt the PDF file
@param password: The password entered by the user
@param dictOwnerPass: The owner password from the standard security handler dictionary
@param dictUserPass: The user password from the standard security handler dictionary
@param dictOE: The owner encrypted string from the standard security handler dictionary
@param dictUE:The user encrypted string from the standard security handler dictionary
@param fileID: The /ID element in the trailer dictionary of the PDF file
@param pElement: The /P element of the Encryption dictionary
@param dictKeyLength: The length of the key
@param revision: The algorithm revision
@param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not
@param passwordType: It specifies the given password type. It can be 'USER', 'OWNER' or None.
@return: A tuple (status,statusContent), where statusContent is the encryption key in case status = 0 or an error message in case status = -1
'''
try:
if revision != 5:
keyLength = dictKeyLength/8
lenPass = len(password)
if lenPass > 32:
password = password[:32]
elif lenPass < 32:
password += paddingString[:32-lenPass]
md5input = password + dictOwnerPass + struct.pack('<i', int(pElement)) + fileID
if revision > 3 and not encryptMetadata:
md5input += '\xFF'*4
key = hashlib.md5(md5input).digest()
if revision > 2:
counter = 0
while counter < 50:
key = hashlib.md5(key[:keyLength]).digest()
counter += 1
key = key[:keyLength]
elif revision == 2:
key = key[:5]
return (0, key)
else:
if passwordType == 'USER':
password = password.encode('utf-8')[:127]
kSalt = dictUserPass[40:48]
intermediateKey = hashlib.sha256(password + kSalt).digest()
ret = peepdf.aes.decryptData('\0'*16+dictUE, intermediateKey)
elif passwordType == 'OWNER':
password = password.encode('utf-8')[:127]
kSalt = dictOwnerPass[40:48]
intermediateKey = hashlib.sha256(password + kSalt + dictUserPass).digest()
ret = peepdf.aes.decryptData('\0'*16+dictOE, intermediateKey)
return ret
except:
return (-1, 'ComputeEncryptionKey error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
def computeObjectKey(id, generationNum, encryptionKey, keyLengthBytes, algorithm='RC4'):
'''
Compute the key necessary to encrypt each object, depending on the id and generation number. Only necessary with /V < 5.
@param id: The object id
@param generationNum: The generation number of the object
@param encryptionKey: The encryption key
@param keyLengthBytes: The length of the encryption key in bytes
@param algorithm: The algorithm used in the encryption/decryption process
@return A tuple (status,statusContent), where statusContent is the computed key in case status = 0 or an error message in case status = -1
'''
try:
key = encryptionKey + struct.pack('<i', id)[:3] + struct.pack('<i', generationNum)[:2]
if algorithm == 'AES':
key += '\x73\x41\x6C\x54' # sAlT
key = hashlib.md5(key).digest()
if keyLengthBytes+5 < 16:
key = key[:keyLengthBytes+5]
else:
key = key[:16]
# AES: block size = 16 bytes, initialization vector (16 bytes), random, first bytes encrypted string
return (0, key)
except:
return (-1, 'ComputeObjectKey error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
def computeOwnerPass(ownerPassString, userPassString, keyLength=128, revision=3):
'''
Compute the owner password necessary to compute the encryption key of the PDF file
@param ownerPassString: The owner password entered by the user
@param userPassString: The user password entered by the user
@param keyLength: The length of the key
@param revision: The algorithm revision
@return A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1
'''
try:
# TODO: revision 5
keyLength = keyLength/8
lenPass = len(ownerPassString)
if lenPass > 32:
ownerPassString = ownerPassString[:32]
elif lenPass < 32:
ownerPassString += paddingString[:32-lenPass]
rc4Key = hashlib.md5(ownerPassString).digest()
if revision > 2:
counter = 0
while counter < 50:
rc4Key = hashlib.md5(rc4Key).digest()
counter += 1
rc4Key = rc4Key[:keyLength]
lenPass = len(userPassString)
if lenPass > 32:
userPassString = userPassString[:32]
elif lenPass < 32:
userPassString += paddingString[:32-lenPass]
ownerPass = RC4(userPassString, rc4Key)
if revision > 2:
counter = 1
while counter <= 19:
newKey = ''
for i in range(len(rc4Key)):
newKey += chr(ord(rc4Key[i]) ^ counter)
ownerPass = RC4(ownerPass, newKey)
counter += 1
return (0, ownerPass)
except:
return (-1, 'ComputeOwnerPass error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
def computeUserPass(userPassString, dictO, fileID, pElement, keyLength=128, revision=3, encryptMetadata=False):
'''
Compute the user password of the PDF file
@param userPassString: The user password entered by the user
@param ownerPass: The computed owner password
@param fileID: The /ID element in the trailer dictionary of the PDF file
@param pElement: The /P element of the /Encryption dictionary
@param keyLength: The length of the key
@param revision: The algorithm revision
@param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not
@return: A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1
'''
# TODO: revision 5
userPass = ''
dictU = ''
dictOE = ''
dictUE = ''
ret = computeEncryptionKey(userPassString, dictO, dictU, dictOE, dictUE, fileID, pElement, keyLength, revision, encryptMetadata)
if ret[0] != -1:
rc4Key = ret[1]
else:
return ret
try:
if revision == 2:
userPass = RC4(paddingString, rc4Key)
elif revision > 2:
counter = 1
md5Input = paddingString + fileID
hashResult = hashlib.md5(md5Input).digest()
userPass = RC4(hashResult, rc4Key)
while counter <= 19:
newKey = ''
for i in range(len(rc4Key)):
newKey += chr(ord(rc4Key[i]) ^ counter)
userPass = RC4(userPass, newKey)
counter += 1
counter = 0
while counter < 16:
userPass += chr(random.randint(32, 255))
counter += 1
else:
# This should not be possible or the PDF specification does not say anything about it
return (-1, 'ComputeUserPass error: revision number is < 2 (%d)' % revision)
return (0, userPass)
except:
return (-1, 'ComputeUserPass error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1])))
def isUserPass(password, computedUserPass, dictU, revision):
'''
Checks if the given password is the User password of the file
@param password: The given password or the empty password
@param computedUserPass: The computed user password of the file
@param dictU: The /U element of the /Encrypt dictionary
@param revision: The number of revision of the standard security handler
@return The boolean telling if the given password is the user password or not
'''
if revision == 5:
vSalt = dictU[32:40]
inputHash = hashlib.sha256(password + vSalt).digest()
if inputHash == dictU[:32]:
return True
else:
return False
elif revision == 3 or revision == 4:
if computedUserPass[:16] == dictU[:16]:
return True
else:
return False
elif revision < 3:
if computedUserPass == dictU:
return True
else:
return False
def isOwnerPass(password, dictO, dictU, computedUserPass, keyLength, revision):
'''
Checks if the given password is the owner password of the file
@param password: The given password or the empty password
@param dictO: The /O element of the /Encrypt dictionary
@param dictU: The /U element of the /Encrypt dictionary
@param computedUserPass: The computed user password of the file
@param keyLength: The length of the key
@param revision: The algorithm revision
@return The boolean telling if the given password is the owner password or not
'''
if revision == 5:
vSalt = dictO[32:40]
inputHash = hashlib.sha256(password + vSalt + dictU).digest()
if inputHash == dictO[:32]:
return True
else:
return False
else:
keyLength = keyLength/8
lenPass = len(password)
if lenPass > 32:
password = password[:32]
elif lenPass < 32:
password += paddingString[:32-lenPass]
rc4Key = hashlib.md5(password).digest()
if revision > 2:
counter = 0
while counter < 50:
rc4Key = hashlib.md5(rc4Key).digest()
counter += 1
rc4Key = rc4Key[:keyLength]
if revision == 2:
userPass = RC4(dictO, rc4Key)
elif revision > 2:
counter = 19
while counter >= 0:
newKey = ''
for i in range(len(rc4Key)):
newKey += chr(ord(rc4Key[i]) ^ counter)
dictO = RC4(dictO, newKey)
counter -= 1
userPass = dictO
else:
# Is it possible??
userPass = ''
return isUserPass(userPass, computedUserPass, dictU, revision)
def RC4(data, key):
'''
RC4 implementation
@param data: Bytes to be encrypyed/decrypted
@param key: Key used for the algorithm
@return: The encrypted/decrypted bytes
'''
y = 0
hash = {}
box = {}
ret = ''
keyLength = len(key)
dataLength = len(data)
# Initialization
for x in range(256):
hash[x] = ord(key[x % keyLength])
box[x] = x
for x in range(256):
y = (y + int(box[x]) + int(hash[x])) % 256
tmp = box[x]
box[x] = box[y]
box[y] = tmp
z = y = 0
for x in range(0, dataLength):
z = (z + 1) % 256
y = (y + box[z]) % 256
tmp = box[z]
box[z] = box[y]
box[y] = tmp
k = box[((box[z] + box[y]) % 256)]
ret += chr(ord(data[x]) ^ k)
return ret
'''
Author: Evan Fosmark (http://www.evanfosmark.com/2008/06/xor-encryption-with-python/)
'''
def xor(bytes, key):
'''
Simple XOR implementation
@param bytes: Bytes to be xored
@param key: Key used for the operation, it's cycled.
@return: The xored bytes
'''
key = cycle(key)
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(bytes, key))
| gpl-3.0 | -155,356,212,868,732,830 | 38.885294 | 171 | 0.600029 | false | 3.999115 | false | false | false |
vitkarpenko/rague | rague/entities/entity.py | 1 | 1423 | """This module provides main Entity class
used as a base for all other entities (through
subclassing or instantiation).
"""
from copy import deepcopy
from rague.components import Component
from rague.utils import to_snake_case
class EntityMeta(type):
"""Every entity should declare a set of "components" instances
which are used as a default values for instantiation.
This metaclass turns this set into a dictionary
for convenience.
"""
def __new__(mcs, name, bases, attrs):
if 'components' not in attrs:
raise AttributeError(
'Entity subclasses should declare a set of '
'Components called "components".'
)
components_dict = {
component.__class__.__name__: component for component in attrs['components']
}
attrs['components'] = components_dict
return super().__new__(mcs, name, bases, attrs)
class Entity(metaclass=EntityMeta):
components = {}
def __init__(self, *args):
init_components = {
component.__class__.__name__: component
for component in args
if isinstance(component, Component)
}
merged_components = {**deepcopy(self.components), **init_components}
if not merged_components:
return
for key, val in merged_components.items():
setattr(self, to_snake_case(key), val)
| mit | 1,002,777,172,380,279,800 | 31.340909 | 88 | 0.623331 | false | 4.759197 | false | false | false |
cosmos/cosmos | atom_sim.py | 1 | 1306 | print "Compute atoms for validators and delegators over time"
import math
atomsVal = 0.000 # starting atoms for validator
atomsDel = 0.010 # starting atoms delegated to validator
atomsAll = 1.0 #
inflation = 0.3 # 30% inflation
inflationLg = math.log(1.0 + inflation) # for exponential
exponential = True # exponential
commission = 0.10 # 5% commission
numBlocksPerYear = 1000
for year in range(0,50):
rewardsYear = 0.0
for i in range(0,numBlocksPerYear):
if exponential:
blockReward = (atomsAll * inflationLg) / float(numBlocksPerYear)
else:
blockReward = inflation / float(numBlocksPerYear)
atomsAll += blockReward
rewardsYear += blockReward
rewardVal = blockReward * (atomsVal / atomsAll)
rewardDel = blockReward * (atomsDel / atomsAll)
rewardVal += rewardDel * commission
rewardDel *= (1.0 - commission)
atomsVal += rewardVal
atomsDel += rewardDel
#print atomsVal, atomsDel, (atomsVal / atomsAll)
print year, "atomsVal: %0.3f" % (atomsVal,), "atomsDel: %0.3f" % (atomsDel,), \
"atomsAll: %0.3f" % (atomsAll,), "atomsVal%%: %0.2f" % ((100 * atomsVal / atomsAll),), \
"atomsDel%%: %0.2f" % ((100 * atomsDel / atomsAll),), "rewards: %0.2f"%(rewardsYear,), \
"valDelRatio: %0.3f" % (atomsVal / (atomsDel + atomsVal))
| mit | 5,159,716,723,795,485,000 | 38.575758 | 90 | 0.668453 | false | 3.131894 | false | false | false |
nayyarv/CorpusCleaners | IEMOCAP Sorter Script/Sorter.py | 1 | 1812 | #Sorter.py
import os
import shutil
from helper import mkdir, listdir
os.chdir("..")
currdir = os.getcwd()
print currdir
def fileParser(filePath):
emotionDict = {}
with open(filePath, 'r') as f:
for line in f:
tokens = line.split("\t")
if len(tokens) == 4:
utteranceID, emotion = line.split("\t")[1:3]
emotionDict[utteranceID] = emotion
return emotionDict
def mover(filePath, emotion):
filename = filePath.rsplit("/",1)[1]
speakerID = filename.split("_",1)[0]
print filename, speakerID, emotion
if not emotion == "xxx":
mkdir(os.path.join("CleanedIEMOCAP", emotion))
mkdir(os.path.join("CleanedIEMOCAP", emotion, speakerID))
shutil.move(filePath, os.path.join("CleanedIEMOCAP", emotion, speakerID, filename))
def refactor():
mkdir("CleanedIEMOCAP")
for i in range(1,6):
sesh = "Session{}".format(i)
for convos in listdir(os.path.join(currdir, sesh, "wav")):
speakerID = convos.split("_")[0]
transcriptionLoc = os.path.join(currdir, sesh, "Eval", convos+".txt")
emotionDict = fileParser(transcriptionLoc)
currLoc = os.path.join(currdir, sesh, "wav", convos)
for utteranceWav in listdir(currLoc):
utteranceID = utteranceWav.rstrip(".wav")
mover(os.path.join(currLoc, utteranceWav), emotionDict[utteranceID])
def stats():
statEmotions = {}
for i in range(1,6):
sesh = "Session{}".format(i)
for convos in listdir(os.path.join(currdir, sesh, "Eval")):
if not convos.endswith(".txt"): continue
emotionDict = fileParser(os.path.join(currdir, sesh, "Eval", convos))
for emotions in emotionDict.values():
try:
statEmotions[emotions]+=1
except KeyError:
statEmotions[emotions] = 1
for k,v in statEmotions.iteritems():
print "{}: {}".format(k,v)
if __name__ == '__main__':
refactor()
stats() | mit | -3,879,577,881,628,224,000 | 22.855263 | 85 | 0.669978 | false | 2.796296 | false | false | false |
sticilface/ESPmanager | buildmanifest.py | 1 | 2388 | #! /usr/bin/env python
import hashlib
import os
import json
import sys
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
# run py buildmanifest.py pathtobin
# place SPIFFS files in pathtobin/data/
# you can copy the bin to the working sketch directory, and leave data as is.
# available commands
# formatSPIFFS
# clearWiFi
# rooturi
# repo = Repo(os.getcwd())
# branch = repo.active_branch
# branch = branch.name
data = {}
data["files"] = {}
List = []
index = 0
a = 0
# Set the directory you want to start from.. all stuff for SPIFFS is in the ./data directory..
print("Python Start")
rootDir = sys.argv[1] + "/data"
for dirName, subdirList, fileList in os.walk(rootDir):
if not dirName.startswith('.'):
for fname in fileList:
if not fname.startswith('.'):
if not fname.endswith(".bin"):
relPath = os.path.relpath( dirName + "/" + fname, rootDir)
locPath = os.path.relpath( dirName + "/" + fname, sys.argv[1])
print("RelPath = " + relPath)
item = {}
#item["index"] = index
index = index + 1
item["location"] = "/" + locPath
# item["isurl"] = False
item["md5"] = md5(dirName + "/" + fname)
item["saveto"] = "/" + relPath
List.append(item)
else:
print(".bin = " + fname)
if fname == "firmware.bin":
index = index + 1
print("binary hit:" + dirName + "/" + fname + "(" + md5(dirName + "/" + fname) + ")")
binary = {}
# binary["index"] = index
binary["location"] = "/data/firmware.bin"
binary["saveto"] = "sketch"
binary["md5"] = md5(dirName + "/" + fname)
List.append(binary)
data["files"] = List
#data["filecount"] = index
# print(json.dumps(data, sort_keys=False, indent=4))
with open(sys.argv[2], 'w') as outfile:
json.dump(data, outfile)
exit(0)
# json_data = json.dumps(data)
# print(List)
#print '[%s]' % ', '.join(map(str, List))
| lgpl-3.0 | 3,367,725,312,397,683,700 | 28.481481 | 109 | 0.511307 | false | 3.612708 | false | false | false |
onzag/vkcrypto | lib/python/pyvkcrypto.py | 2 | 2525 | import ctypes
import sys
DLL = ctypes.CDLL('/usr/local/lib/libvkcrypto.so')
VK_SCRYPT_HASH_LEN = 32
VK_SCRYPT_SALT_LEN = 16
VK_SCRYPT_LEN = 48
AES_BLOCK_SIZE = 16
def scrypt(plaintext,hard=False):
buffer = ctypes.create_string_buffer(DLL.getScryptSize())
status = DLL.scrypt(plaintext,len(plaintext),buffer,hard)
if (status == 0):
return buffer.raw
else:
return None;
def scryptcheck(scryptdata,plaintext,hard=False):
return (DLL.scryptcheck(scryptdata,plaintext,len(plaintext),hard) == 0)
def scryptencrypt(plaintext,password,hard=False):
buffer = ctypes.create_string_buffer(DLL.getScryptEncryptedSize(len(plaintext)))
status = DLL.scryptencrypt(plaintext,len(plaintext),password,len(password),buffer,hard)
if (status == 0):
return buffer.raw
else:
return None
def scryptdecrypt(cipher,password,hard=False):
reslen = DLL.getScryptDecryptedSize(len(cipher));
buffer = ctypes.create_string_buffer(reslen);
real_size = ctypes.c_uint()
status = DLL.scryptdecrypt(cipher,len(cipher),password,len(password),buffer,ctypes.byref(real_size),hard);
if (status == 0):
return buffer.raw[:real_size.value]
else:
return None
def genRSA2048():
genrsa = DLL.genRSA2048
genrsa.argtypes = [ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),ctypes.POINTER(ctypes.c_uint),
ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),ctypes.POINTER(ctypes.c_uint)]
pub = ctypes.POINTER(ctypes.c_ubyte)();
pub_l = ctypes.c_uint(0)
priv = ctypes.POINTER(ctypes.c_ubyte)();
priv_l = ctypes.c_uint(0)
status = genrsa(ctypes.byref(pub),ctypes.byref(pub_l),ctypes.byref(priv),ctypes.byref(priv_l));
if (status == 0):
if (sys.version_info.major >= 3):
return ((bytes(pub[:pub_l.value]), bytes(priv[:priv_l.value])))
pub_h = b''
for i in range(0,pub_l.value):
pub_h += chr(pub[i])
priv_h = b''
for i in range(0,priv_l.value):
priv_h += chr(priv[i])
return (pub_h,priv_h)
else:
return None
def RSAencrypt(key,public,plaintext):
reslen = DLL.getRSAEncryptedSize(len(plaintext))
buffer = ctypes.create_string_buffer(reslen)
status = DLL.RSAencrypt(key,len(key),plaintext,len(plaintext),public,buffer)
if (status == 0):
return buffer.raw
else:
return None
def RSAdecrypt(key,public,cipher):
reslen = DLL.getRSADecryptedSize(len(cipher))
buffer = ctypes.create_string_buffer(reslen)
real_size = ctypes.c_uint()
status = DLL.RSAdecrypt(key,len(key),cipher,len(cipher),public,buffer,ctypes.byref(real_size))
if (status == 0):
return buffer.raw[:real_size.value]
else:
return None
| mit | -5,353,230,014,360,499,000 | 29.421687 | 107 | 0.723168 | false | 2.908986 | false | false | false |
canvasnetworks/canvas | website/drawquest/apps/quests/migrations/0003_auto__add_quest__del_field_scheduledquest_comment__del_field_scheduled.py | 1 | 11270 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('canvas', '0013_add_comment_table'),
('canvas', '0074_create_category'),
('canvas', '0144_auto__add_field_comment_title'),
('canvas', '0147_auto__add_field_commentsticker_epic_message'),
)
def forwards(self, orm):
# Deleting field 'ScheduledQuest.comment'
db.delete_column('quests_scheduledquest', 'comment_id')
# Deleting field 'ScheduledQuest.archived'
db.delete_column('quests_scheduledquest', 'archived')
# Adding field 'ScheduledQuest.quest'
delete_quest = False
if db.dry_run:
quest_id = None
else:
if not orm.Quest.objects.all():
quest = orm.Quest.objects.create()
delete_quest = True
else:
quest = orm.Quest.objects.all()[0]
quest_id = quest.id
db.add_column('quests_scheduledquest', 'quest', self.gf('django.db.models.fields.related.OneToOneField')(default=quest_id, to=orm['canvas.Comment'], unique=True), keep_default=False)
# Adding field 'ScheduledQuest.appeared_on'
db.add_column('quests_scheduledquest', 'appeared_on', self.gf('canvas.util.UnixTimestampField')(null=True, db_index=True), keep_default=False)
if delete_quest:
quest.delete()
def backwards(self, orm):
return
# User chose to not deal with backwards NULL issues for 'ScheduledQuest.comment'
raise RuntimeError("Cannot reverse this migration. 'ScheduledQuest.comment' and its values cannot be restored.")
# Adding field 'ScheduledQuest.archived'
db.add_column('quests_scheduledquest', 'archived', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Deleting field 'ScheduledQuest.quest'
db.delete_column('quests_scheduledquest', 'quest_id')
# Deleting field 'ScheduledQuest.appeared_on'
db.delete_column('quests_scheduledquest', 'appeared_on')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'drawquest_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['canvas_auth.User'], 'proxy': 'True'}
},
'quests.quest': {
'Meta': {'object_name': 'Quest', 'db_table': "'canvas_comment'", '_ormbases': ['canvas.Comment'], 'proxy': 'True'}
},
'quests.scheduledquest': {
'Meta': {'ordering': "['sort']", 'object_name': 'ScheduledQuest'},
'appeared_on': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'db_index': 'True'}),
'curator': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'scheduled_quests'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quest': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['canvas.Comment']", 'unique': 'True'}),
'sort': ('django.db.models.fields.IntegerField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
}
}
complete_apps = ['quests']
| bsd-3-clause | -8,638,866,046,194,777,000 | 68.567901 | 195 | 0.56362 | false | 3.661468 | false | false | false |
jeoliva/hls-analyzer | m3u8/__init__.py | 2 | 2424 | # coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
import sys
PYTHON_MAJOR_VERSION = sys.version_info
import os
import posixpath
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
try:
import urlparse as url_parser
import urllib2
cj = CookieJar()
cookieProcessor = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookieProcessor)
urlopen = opener.open
except ImportError:
import urllib.parse as url_parser
from urllib.request import urlopen as url_opener
urlopen = url_opener
from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment
from m3u8.parser import parse, is_url
__all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media',
'Segment', 'loads', 'load', 'parse')
def loads(content):
'''
Given a string with a m3u8 content, returns a M3U8 object.
Raises ValueError if invalid content
'''
return M3U8(content)
def load(uri):
'''
Retrieves the content from a given URI and returns a M3U8 object.
Raises ValueError if invalid content or IOError if request fails.
'''
if is_url(uri):
return _load_from_uri(uri)
else:
return _load_from_file(uri)
def getCookieProcessor():
return cookieProcessor
# Support for python3 inspired by https://github.com/szemtiv/m3u8/
def _load_from_uri(uri):
resource = urlopen(uri)
base_uri = _parsed_url(_url_for(uri))
if PYTHON_MAJOR_VERSION < (3,):
content = _read_python2x(resource)
else:
content = _read_python3x(resource)
return M3U8(content, base_uri=base_uri)
def _url_for(uri):
return urlopen(uri).geturl()
def _parsed_url(url):
parsed_url = url_parser.urlparse(url)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
base_path = posixpath.normpath(parsed_url.path + '/..')
return url_parser.urljoin(prefix, base_path)
def _read_python2x(resource):
return resource.read().strip()
def _read_python3x(resource):
return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8"))
def _load_from_file(uri):
with open(uri) as fileobj:
raw_content = fileobj.read().strip()
base_uri = os.path.dirname(uri)
return M3U8(raw_content, base_uri=base_uri)
| mit | 2,797,641,510,060,910,000 | 27.186047 | 89 | 0.689356 | false | 3.4189 | false | false | false |
mcxiaoke/python-labs | archives/tk/lib.py | 1 | 1580 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-08-10 21:25:48
from __future__ import print_function
import sys
import os
from Tkinter import *
class ScrolledText(Frame):
def __init__(self, parent=None, text='', file=None):
Frame.__init__(self, parent)
# 自动扩展空间
self.pack(expand=YES, fill=BOTH)
self.makeWidgets()
self.settext(text, file)
def makeWidgets(self):
text = Text(self, relief=SUNKEN)
sbar = Scrollbar(self)
# 连接滚动条
sbar.config(command=text.yview)
text.config(yscrollcommand=sbar.set)
# 先布置滚动条
sbar.pack(side=RIGHT, fill=Y)
text.pack(side=LEFT, expand=YES, fill=BOTH)
self.text = text
def settext(self, text='', file=None):
if file:
text = open(file, 'r').read()
# 删除当前的文本
self.text.delete('1.0', END)
# 从最开始插入新文本
self.text.insert('1.0', text)
# 光标移动到开头
self.text.mark_set(INSERT, '1.0')
# 获取焦点
self.text.focus()
def insert(self, index, text=''):
self.text.insert(index, text)
def see(self, index):
self.text.see(index)
def bind(self, sequence, func):
self.text.bind(sequence, func)
def update(self):
self.text.update()
def gettext(self):
# 返回全部文本
# 1.0 表示第1行第0列 -1c表示一个字符之前
return self.text.get('1.0', END+'-1c')
| apache-2.0 | 1,650,884,617,046,082,000 | 24.034483 | 56 | 0.563361 | false | 2.75 | false | false | false |
abadger/stellarmagnate | magnate/savegame/base_types.py | 1 | 5479 | # Stellar Magnate - A space-themed commodity trading game
# Copyright (C) 2018 Toshio Kuratomi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Routines to load base game types from data files
"""
import enum
import os
from functools import partial
import voluptuous as v
from voluptuous.humanize import validate_with_humanized_errors as v_validate
try: # pragma: no cover
from yaml import CSafeLoader as Loader
except ImportError: # pragma: no cover
from yaml import SafeLoader as Loader
from ..logging import log
mlog = log.fields(mod=__name__)
# Enums that are created at runtime and then used with the database. See the
# data/base/stellar-types.yml file if you suspect this list is out of date
# pylint: disable=invalid-name
CommodityType = None
CelestialType = None
LocationType = None
FinancialType = None
OrderStatusType = None
# pylint: enable=invalid-name
def type_name(value):
"""Validate that the names of types follow our conventions"""
flog = mlog.fields(func='type_name')
flog.fields(value=value).debug('validate that type_name follows convention')
if not isinstance(value, str):
raise ValueError('Type names must be strings')
if not value.endswith('Type'):
raise ValueError('Type names must end with "Type"')
if not value[0] == value[0].upper():
raise ValueError('Type names must begin with an uppercase character (following class'
' naming conventions)')
flog.debug('type_name {0} follows the proper conventions', value)
return value
def _generic_types_validator(type_enum, value):
"""Validate that a string is valid in a :class:`enum.Enum` and transform it into the enum"""
flog = mlog.fields(func=f'_generic_types_validator', type_enum=type_enum)
flog.fields(type_enum=type_enum, value=value).debug('validate and transform into an enum value')
try:
enum_value = type_enum[value]
except KeyError:
raise ValueError(f'{value} is not a valid member of {type_enum.__name__}')
except Exception:
if not isinstance(value, type_enum):
raise ValueError(f'{value} is not a {type_enum.__name__}')
raise
flog.fields(enum_value=enum_value).debug('transformed into enum_value to return')
return enum_value
DATA_TYPES_SCHEMA = v.Schema({'version': '0.1',
'types': {type_name: [str]}, },
required=True)
def load_base_types(datadir):
"""
Parse the yaml file of base enum types and return the information
:arg datadir: The data directory to find the types file
:returns: A list of types
"""
flog = mlog.fields(func='load_base_types')
flog.fields(datadir=datadir).debug('Entered load_base_types')
data_file = os.path.join(datadir, 'base', 'stellar-types.yml')
with_file_log = flog.fields(filename=data_file)
with_file_log.debug('constructed data_file path {data_file}', data_file=data_file)
with_file_log.debug('Opening data_file')
with open(data_file, 'r') as data_fh:
with_file_log.debug('reading data_file')
yaml_data = data_fh.read()
with_file_log.fields(yaml=yaml_data).debug('parsing yaml string')
loader = Loader(yaml_data)
data = loader.get_single_data()
flog.fields(data=data).debug('Validating type data structure')
data = v_validate(data, DATA_TYPES_SCHEMA)
flog.debug('Returning type data')
return data
def init_base_types(datadir):
"""
Initialize the global base types from the types data file
:arg datadir: The data directory to find the types file
**Side effects**: This function initializes the global Type variables which are Python Enums for
various data types (Types of Commodities, Types of Locations, etc). Since it modifies module
globals it needs to be run early, before any threading. The Type variables are used by
everything else in savegames so it should be run as one of the first things upon accessing a
savegame.
"""
flog = mlog.fields(func='init_base_types')
flog.fields(datadir=datadir).debug('Entered init_base_types')
m_globals = globals()
for name in m_globals:
if name.endswith('Type'):
if m_globals[name] is None:
break
else:
flog.debug('base_types Enums already created. Exiting init_base_types early')
return
base_type_data = load_base_types(datadir)
for name, entries in base_type_data['types'].items():
flog.fields(enum=name).debug('Creating enum')
m_globals[name] = enum.Enum(name, entries, module=__name__)
# Create a voluptuous validator for this type as well
m_globals[name].validator = partial(_generic_types_validator, m_globals[name])
flog.debug('Leaving init_base_types')
| agpl-3.0 | 4,070,307,166,160,550,400 | 35.284768 | 100 | 0.688629 | false | 3.799584 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.