repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ostrokach/biskit | scripts/Mod/align.py | 1 | 4218 | #!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2012 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
##
## last $Author$
## $Date$
## $Revision$
import Biskit.Mod.modUtils as modUtils
from Biskit.Mod import *
import Biskit.tools as tools
from Biskit import EHandler
from Biskit import LogFile
import sys, os.path
def _use( o ):
print """
Syntax: align.py [ -o |outFolder| -log |logFile| -h |host_computer| -nosap ]
Options:
-o output folder for results (default: .)
-log log file (default: STDOUT)
-nosap skip structural alignment (default: don't skip)
-h host computer for calculation (default: local computer)
-> must be accessible w/o password via ssh, check!
-? or help .. this help screen
Default options:
"""
for key, value in o.items():
print "\t-",key, "\t",value
sys.exit(0)
def defaultOptions():
return {'o':'.',
'log': None,
'h':None
}
### MAIN ###
options = tools.cmdDict( defaultOptions() )
outFolder = tools.absfile( options['o'] )
host = options['h']
sap = not 'nosap' in options
log = None
if options['log']:
log = LogFile( outFolder + '/' + options['log'], 'a' )
if not (os.path.exists( outFolder +'/templates' ) ):
print 'Current directory is not a valid modeling folder (missing /templates).'
_use( defaultOptions() )
if '?' in options or 'help' in options:
_use( defaultOptions() )
###################
## Aligner
##
## Create a sequence-structure alignment using T-coffee.
## Convert the alignment into Modeller compatible format
## input: sequences/nr.fasta
## templates/templates.fasta
## templates/t_cofee/*.alpha
##
## output: t_coffee/fast_pair.lib
## /final.score_html
## /struct.aln
## /t_coffee.log_*
## /final.aln
## /lalign_id_pair.lib
## /struct.aln_original
## /final.phylip
## /sap_pair.lib
## /t_coffee.inp
## /final.pir_aln (input for Modeller)
## /sap_pair.lib_original
## note 1: If there are more than approximately 50 sequences overall
## t_coffe will eat all the memory and the job will not finish
## This should be fixed in more recent versions of T-Coffee
## (v > 3.2) where T-Coffee, according to the manual "switches
## to a heuristic mode, named DPA, where DPA stands for Double
## Progressive Alignment."
## note 2: If there is only one template structure step 2 of T-coffee
## will not work. Solution, skip the structural alignment if
## only one template structure is provided.
## note 3: In quite som cases the sequence retrieved from the nrpdb
## sequence database is different from the sequence extracted
## from the coordinates in the pdb-file. This will sometimes
## cause t-coffee to terminate with an error (two sequences
## with the same name but with different sequences). Temporary
## solution: Choose another structure from the same cluster
## as the troublemaker.
try:
a = Aligner( outFolder, log, verbose=1, sap=sap )
a.align_for_modeller_inp()
a.go(host)
except:
EHandler.error( 'Error while building alingnments.')
print "\nalign.py -? or align.py -help for help screen"
| gpl-3.0 | 5,412,043,434,244,115,000 | 31.446154 | 83 | 0.627786 | false |
sopython/kesh | kesh/_database/creation/create_post_history.py | 1 | 1880 | from pymongo import MongoClient
from lxml import etree
from dateutil.parser import parse
import pickle
from time import gmtime, strftime
import os
import re
data_dir = '../../../bin/so_data_/'
file_name = 'PostHistory.xml'
db_name = 'kesh'
coll_name = 'post_history'
client = MongoClient()
db = client[db_name]
coll = db[coll_name]
context = etree.iterparse(os.path.join(data_dir, file_name),
events=('start', 'end'))
str_to_int = {'Id', 'PostHistoryTypeId', 'PostId', 'UserID'}
str_to_date = {'CreationDate'}
def convert(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# Load in a set of python ids.
with open('question_ids.pickle', 'rb') as q, \
open('answer_ids.pickle', 'rb') as a:
question_ids = pickle.load(q)
answer_ids = pickle.load(a)
ids = question_ids | answer_ids
f = open(os.path.join(data_dir, './logs/{:s}.log'.format(coll_name)), 'w')
s = 'Importing {:s} data.\n\n'.format(coll_name)
f.write(s)
print(s, end='')
i = 0
for event, elem in context:
if event == 'end' and elem.tag == 'row':
# Create a dictionary and convert any necessary fields.
d = dict(elem.items())
if int(d['PostId']) in ids:
d = {convert(k):int(v) if k in str_to_int else
parse(v) if k in str_to_date else
v for k, v in d.items()}
coll.insert(d)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
i += 1
if i % 10000 == 0:
s_option = (strftime('%H:%M:%S', gmtime()), d['id'], i)
s = '{:s} : Id - {:d} : # - {:d}\n'.format(*s_option)
print(s, end='')
f.write(s)
print('Creating indices.')
coll.ensure_index(convert('id'))
f.close()
| bsd-3-clause | -2,257,058,302,008,041,500 | 27.923077 | 74 | 0.551064 | false |
lsbardel/python-stdnet | tests/all/query/related.py | 1 | 6409 | import datetime
from random import randint, uniform
from stdnet.utils import test
from examples.models import Node, Role, Profile, Dictionary
from examples.data import FinanceTest, Position, Instrument, Fund
def create(cls, root=None, nesting=None):
models = cls.mapper
if root is None:
with models.session().begin() as t:
root = t.add(models.node(weight=1.0))
yield t.on_result
yield create(cls, root, nesting=nesting)
elif nesting:
N = randint(2,9)
with models.session().begin() as t:
for n in range(N):
node = t.add(models.node(parent=root, weight=uniform(0,1)))
yield t.on_result
yield cls.multi_async((create(cls, node, nesting-1) for node\
in t.saved[node._meta]))
class TestSelfForeignKey(test.TestCase):
'''The Node model is used only in this test class and should be used only
in this test class so that we can use the manager in a parallel test suite.'''
model = Node
nesting = 2
@classmethod
def after_setup(cls):
return create(cls, nesting=cls.nesting)
def test_meta(self):
all = yield self.query().load_related('parent').all()
for n in all:
if n.parent:
self.assertTrue(isinstance(n.parent, self.model))
def test_related_cache(self):
all = yield self.query().all()
pcache = self.model._meta.dfields['parent'].get_cache_name()
for n in all:
self.assertFalse(hasattr(n, pcache))
yield self.multi_async((n.parent for n in all))
for n in all:
self.assertTrue(hasattr(n, pcache))
self.assertEqual(getattr(n, pcache), n.parent)
def test_self_related(self):
query = self.query()
root = yield query.get(parent=None)
children = yield root.children.query().load_related('parent').all()
self.assertTrue(children)
for child in children:
self.assertEqual(child.parent, root)
children2 = yield child.children.query().load_related('parent').all()
self.assertTrue(children2)
for child2 in children2:
self.assertEqual(child2.parent, child)
def test_self_related_filter_on_self(self):
query = self.query()
# We should get the nodes just after the root
root = yield query.get(parent=None)
qs = yield query.filter(parent__parent=None).load_related('parent').all()
self.assertTrue(qs)
for node in qs:
self.assertEqual(node.parent, root)
class TestDeleteSelfRelated(test.TestWrite):
model = Node
nesting = 2
def setUp(self):
return create(self, nesting=self.nesting)
def test_related_delete_all(self):
all = yield self.query().all()
self.assertTrue(all)
root = 0
for a in all:
if a.parent is None:
root += 1
self.assertEqual(root, 1)
yield self.query().delete()
yield self.async.assertEqual(self.query().count(), 0)
def test_related_root_delete(self):
qs = self.query().filter(parent=None)
yield qs.delete()
yield self.async.assertEqual(self.query().count(), 0)
def test_related_filter_delete(self):
query = self.query()
root = yield query.get(parent=None)
self.assertFalse(root.parent)
qs = query.filter(parent=root)
yield qs.delete()
query = self.query()
yield self.async.assertEqual(query.count(), 1)
qs = yield query.all()
self.assertEqual(query[0], root)
class TestRealtedQuery(FinanceTest):
@classmethod
def after_setup(cls):
return cls.data.makePositions(cls)
def test_related_filter(self):
query = self.query(Position)
# fetch all position with EUR instruments
instruments = self.query(Instrument).filter(ccy='EUR')
peur1 = yield self.query(Position).filter(instrument=instruments)\
.load_related('instrument').all()
self.assertTrue(peur1)
for p in peur1:
self.assertEqual(p.instrument.ccy,'EUR')
peur = self.query(Position).filter(instrument__ccy='EUR')
qe = peur.construct()
self.assertEqual(qe._get_field, None)
self.assertEqual(len(qe),1)
self.assertEqual(qe.keyword, 'set')
peur = yield peur.all()
self.assertEqual(set(peur), set(peur1))
def test_related_exclude(self):
query = self.query(Position)
peur = yield query.exclude(instrument__ccy='EUR').load_related('instrument').all()
self.assertTrue(peur)
for p in peur:
self.assertNotEqual(p.instrument.ccy, 'EUR')
def test_load_related_model(self):
position = yield self.query(Position).get(id=1)
self.assertTrue(position.instrument_id)
cache = position.get_field('instrument').get_cache_name()
self.assertFalse(hasattr(position, cache))
instrument = yield position.load_related_model('instrument',
load_only=('ccy',))
self.assertTrue(isinstance(instrument, Instrument))
self.assertEqual(instrument._loadedfields, ('ccy',))
self.assertEqual(id(instrument), id(position.instrument))
def test_related_manager(self):
session = self.session()
fund = yield session.query(Fund).get(id=1)
positions1 = yield session.query(Position).filter(fund=fund).all()
positions = yield fund.positions.query().load_related('fund').all()
self.assertTrue(positions)
for p in positions:
self.assertEqual(p.fund, fund)
self.assertEqual(set(positions1), set(positions))
def test_related_manager_exclude(self):
inst = yield self.query().get(id=1)
fund = yield self.query(Fund).get(id=1)
pos = yield fund.positions.exclude(instrument=inst).load_related('instrument')\
.load_related('fund').all()
for p in pos:
self.assertNotEqual(p.instrument, inst)
self.assertEqual(p.fund, fund)
| bsd-3-clause | -5,532,104,236,342,709,000 | 36.7 | 90 | 0.591512 | false |
nijel/weblate | setup.py | 1 | 5078 | #!/usr/bin/env python3
#
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
from distutils import log
from distutils.command.build import build
from distutils.core import Command
from distutils.dep_util import newer
from glob import glob
from itertools import chain
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py
from translate.tools.pocompile import convertmo
LOCALE_MASKS = [
"weblate/locale/*/LC_MESSAGES/*.po",
]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open("README.rst") as readme:
README = readme.read()
with open("requirements.txt") as requirements:
REQUIRES = requirements.read().splitlines()
EXTRAS = {"all": []}
with open("requirements-optional.txt") as requirements:
section = None
for line in requirements:
line = line.strip()
if line.startswith("-r") or not line:
continue
if line.startswith("#"):
section = line[2:]
else:
dep = line.split(";")[0].strip()
EXTRAS[section] = dep
if section != "MySQL":
EXTRAS["all"].append(dep)
class WeblateBuildPy(build_py):
def find_package_modules(self, package, package_dir):
"""Filter settings.py from built module."""
result = super().find_package_modules(package, package_dir)
return [item for item in result if item[2] != "weblate/settings.py"]
class BuildMo(Command):
description = "update MO files to match PO"
user_options = []
def initialize_options(self):
self.build_base = None
def finalize_options(self):
self.set_undefined_options("build", ("build_base", "build_base"))
def run(self):
for name in chain.from_iterable(glob(mask) for mask in LOCALE_MASKS):
output = os.path.splitext(name)[0] + ".mo"
if not newer(name, output):
continue
self.announce(f"compiling {name} -> {output}", level=log.INFO)
with open(name, "rb") as pofile, open(output, "wb") as mofile:
convertmo(pofile, mofile, None)
class WeblateBuild(build):
"""Override the default build with new subcommands."""
# The build_mo has to be before build_data
sub_commands = [("build_mo", lambda self: True)] + build.sub_commands
setup(
name="Weblate",
version="4.5.2",
python_requires=">=3.6",
packages=find_packages(),
include_package_data=True,
description=(
"A web-based continuous localization system with "
"tight version control integration"
),
long_description=README,
long_description_content_type="text/x-rst",
license="GPLv3+",
keywords="i18n l10n gettext git mercurial translate",
url="https://weblate.org/",
download_url="https://weblate.org/download/",
project_urls={
"Issue Tracker": "https://github.com/WeblateOrg/weblate/issues",
"Documentation": "https://docs.weblate.org/",
"Source Code": "https://github.com/WeblateOrg/weblate",
"Twitter": "https://twitter.com/WeblateOrg",
},
author="Michal Čihař",
author_email="[email protected]",
install_requires=REQUIRES,
zip_safe=False,
extras_require=EXTRAS,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Localization",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
entry_points={"console_scripts": ["weblate = weblate.runner:main"]},
cmdclass={"build_py": WeblateBuildPy, "build_mo": BuildMo, "build": WeblateBuild},
)
| gpl-3.0 | 7,299,572,486,727,412,000 | 34.229167 | 86 | 0.650108 | false |
michael-hart/tactile_tablet | src/tablet.py | 1 | 1195 | # File created 26/01/2015
# Contains main method to write Braille output to a tablet
# Pins are [12, 16, 18, 22, 24, 26] in GPIO.BOARD mode
import RPi.GPIO as GPIO
import time
import atexit
from braille_converter import convert_string
from braille_dict import braille_dict as bdict
led_pins = [12, 16, 18, 22, 24, 26]
def main():
tablet_columns = 2
tablet_rows = 3
leftover_buffer = []
# Set up GPIO
GPIO.setmode(GPIO.BOARD)
for pin in led_pins:
GPIO.setup(pin, GPIO.OUT)
atexit.register(cleanup)
print "Enter sentences for Braille display"
while True:
display_str = raw_input('-> ')
word_buffer = convert_string(display_str)
word_buffer = leftover_buffer + word_buffer
line_buffer, leftover_buffer = fit_to_screen(word_buffer, tablet_columns, tablet_rows, leftover_buffer)
# TODO: Output line_buffer to display
def fit_to_screen(words, cols, rows):
leftover = list(words)
lines = []
for i in range(rows):
lines.append([])
while len(lines[i]) + len(leftover[0]) + 1 < cols:
lines[i] += leftover[0] + bdict[' ']
leftover = leftover[1:]
return lines, leftover
def cleanup():
print "Cleaning up..."
GPIO.cleanup()
if __name__ == '__main__':
main()
| gpl-2.0 | -7,119,182,581,759,252,000 | 22.431373 | 105 | 0.687029 | false |
zmr/namsel | edit_distance_tests/generate_accuracy_report.py | 1 | 5481 | #encoding: utf-8
import os
import sys
import glob
import re
import codecs
from difflib import HtmlDiff
from recognize import run_main
import Levenshtein as L
import requests
import datetime
import multiprocessing
from config_manager import Config, run_all_confs_for_page
LOGIN_URL = 'https://dhattupages.appspot.com/accounts/login/?next=/'
PW = 'dartsedolhagangdege7'
credentials = {'username':'zach', 'password':PW}
HD = HtmlDiff()
test_vols = ['sample_book6', 'sample_book5', 'sample_book4', 'sample_book3',
'sample_book2', 'sample_book1', 'ldong-yon-tan-rgya-mtsho',
'don-grub-rgyal', 'taranatha']
test_vols.sort()
style_old = ''' <style type="text/css">
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
</style>'''
style_new = ''' <style type="text/css">
table.diff {font-family:Courier; border:medium;}
.diff_header {background-color:#e0e0e0}
td.diff_header {text-align:right}
.diff_next {background-color:#c0c0c0}
.diff_add {background-color:#aaffaa}
.diff_chg {background-color:#ffff77}
.diff_sub {background-color:#ffaaaa}
tr {line-height: 40px;}
td {font-family: "Qomolangma-Uchen Sarchung" !important}
</style>'''
multiple_spaces = re.compile(ur'[ \t]{1,}')
pwd = os.getcwd()
def open(fl, mode):
return codecs.open(fl, mode, 'utf-8')
def _normalize_input(txt):
# Strip lines of extra whitespace
lines = txt.split('\n')
lines = [l.strip() for l in lines if l.strip()]
# remove top title line
lines = lines[1:]
txt = '\n'.join(lines)
# collapse multiple spaces to 1 space
txt = multiple_spaces.sub(' ', txt)
txt = txt.replace(u'༎', u'།།')
txt = txt.replace(u'<', u'〈')
txt = txt.replace(u'>', u'〉')
txt = txt.replace(u'༑', u'་།་')
txt = txt.replace(u'-', u'—')
return txt
def _make_html_diff(txt, ocr):
html = HD.make_file(txt.split('\n'), ocr.split('\n'))
html = html.replace(style_old, style_new)
html = html.replace('ISO-8859-1', 'utf-8')
html = html.replace('<tbody>\n', '<tbody>\n<tr><td></td><td></td><td>Manual input</td><td></td><td></td><td>OCR</td></tr>\n')
# print html
return html
def _get_compare_data(tif_txt_pair):
tif = tif_txt_pair[0]
txt = tif_txt_pair[1]
if tif[:-4] == txt[:-4]: # This should always be true
# ocr = run_main(tif, conf=Config(path='/home/zr/letters/conf/443cf9ec-76c7-44bc-95ad-593138d2d5fc.conf'), text=True)
# ocr = run_main(tif, conf=Config(segmenter='stochastic', recognizer='hmm', break_width=3.6), text=True)
ocr = run_main(tif, text=True)
# ocr = run_all_confs_for_page(tif, text = True)
ocr = ocr.strip()
txt = open(txt,'r').read()
txt = _normalize_input(txt)
edit_dist = L.distance(txt, ocr)
edit_ratio = L.ratio(txt, ocr)
html = _make_html_diff(txt, ocr)
# sys.exit()
data = {'edit_distance': edit_dist,
'edit_ratio': edit_ratio,
'filename': os.path.basename(tif),
'html': html
}
return data
def do_pairwise_comparison(origflpath, ocrflpath):
o = open(origflpath, 'r').read()
s = open(ocrflpath, 'r').read()
s = _normalize_input(s)
return L.ratio(o,s)
#data = {'csrfmiddlewaretoken':s.cookies['csrftoken'],
# 'edit_distance': edit_dist,
# 'filename': os.path.basename(tif),
# 'sample_set': t, 'html': html, 'timestamp': timestamp,
# 'comment': comment
# }
if __name__ == '__main__':
from sklearn.externals.joblib import Parallel, delayed
timestamp = datetime.datetime.now()
comment = raw_input('Comment: ')
for t in test_vols:
os.chdir(os.path.abspath(t))
tifs = glob.glob('*tif')
txts = glob.glob('*txt')
tifs.sort()
txts.sort()
pool = multiprocessing.Pool()
# all_data = Parallel(n_jobs=12)(delayed(_get_compare_data)(i) for i in zip(tifs, txts))
all_data = pool.map(_get_compare_data, zip(tifs, txts))
# all_data = []
# for i in zip(tifs, txts):
# all_data.append(_get_compare_data(i))
with requests.session() as s:
s.get(LOGIN_URL)
credentials['csrfmiddlewaretoken'] = s.cookies['csrftoken']
s.post(LOGIN_URL, data=credentials,
headers={'Referer': 'https://dhattupages.appspot.com/'},
allow_redirects=True)
print 'posting data for ', t
for data in all_data:
data['csrfmiddlewaretoken'] = s.cookies['csrftoken']
data['sample_set'] = t
data['timestamp'] = timestamp
data['comment'] = comment
r = s.post('https://dhattupages.appspot.com/test-data-update',
headers={'Referer': 'https://dhattupages.appspot.com/'},
data=data)
r.raise_for_status()
os.chdir(pwd) | mit | 8,027,859,567,957,796,000 | 31.903614 | 129 | 0.564366 | false |
rafin/Spotify-Visualizations | engine/views.py | 1 | 3163 | from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from urllib import unquote
#spotify tools
from spot import pl
from spot import keys
import json
from json import loads as dict #converts json back to dictionary
#generate serializer for retrieving db data
from django.core import serializers
json_serializer = serializers.get_serializer("json")()
import models
from django.views.decorators.csrf import ensure_csrf_cookie
@ensure_csrf_cookie
def index(request):
s_auth_url = keys.auth_url(1)
p_auth_url = keys.auth_url(0)
return render_to_response('index.html', {'p_auth_url': p_auth_url, 's_auth_url': s_auth_url})
def plot(request, token, username):
return render_to_response('plot.html', {'token': token, 'name': username})
def sift(request, token, username):
return render_to_response('sift.html', {'token': token, 'name': username})
def getsongs(request):
'''returns json response of given playlist title'''
username = request.GET.get('username', '')
title = unquote(request.GET.get('title', ''))
token = request.GET.get('token','')
#if title is a list of titles instead of just 1
if '~[' in title:
titles = title.split('~[')
songs = []
for title in titles:
songs += pl.pl_data(title, username, token)['songs']
songs = {"songs":songs}
else:
songs = pl.pl_data(title, username, token)
#json_songs = json_serializer.serialize(songs, ensure_ascii=True)
return JsonResponse(songs, safe=False )
def getplaylists(request):
'''returns json response of given playlist title'''
#playlists = models.Playlist.objects.all()
username = request.GET.get('username', '')
token = request.GET.get('token', '')
playlists = pl.get_playlists(username, token)
#json_playlists = json_serializer.serialize(playlists, ensure_ascii=True)
return JsonResponse(playlists, safe=False)
def newplaylist(request):
if request.is_ajax():
if request.method == 'POST':
title = request.POST.get("title","")
songs = request.POST.get("songs","")
songs = songs[1:-1]
songs = songs.replace('"', '')
#reauthorize and get username
token = request.POST.get("token","")
sp = keys.get_access(token)
username = sp.current_user()['id']
pl.new_playlist(title, songs)
return JsonResponse({"success":"yes"})
def authorize_plot(request):
code = request.GET.get('code', '')
token = keys.get_token(code, 0)
#get username
sp = keys.get_access(token)
username = sp.current_user()['id']
url = reverse('plot', args=(), kwargs={'token': token, 'username': username})
return HttpResponseRedirect(url)
def authorize_sift(request):
code = request.GET.get('code', '')
token = keys.get_token(code, 1)
#get username
sp = keys.get_access(token)
username = sp.current_user()['id']
url = reverse('sift', args=(), kwargs={'token': token, 'username': username})
return HttpResponseRedirect(url)
| mit | -6,362,495,189,862,798,000 | 33.010753 | 97 | 0.654758 | false |
murgatroid99/grpc | src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py | 1 | 7918 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test making many calls and immediately cancelling most of them."""
import threading
import unittest
from grpc._cython import cygrpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
_SERVER_SHUTDOWN_TAG = 'server_shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TAG = 'receive_close_on_server'
_RECEIVE_MESSAGE_TAG = 'receive_message'
_SERVER_COMPLETE_CALL_TAG = 'server_complete_call'
_SUCCESS_CALL_FRACTION = 1.0 / 8.0
class _State(object):
def __init__(self):
self.condition = threading.Condition()
self.handlers_released = False
self.parked_handlers = 0
self.handled_rpcs = 0
def _is_cancellation_event(event):
return (event.tag is _RECEIVE_CLOSE_ON_SERVER_TAG and
event.batch_operations[0].received_cancelled)
class _Handler(object):
def __init__(self, state, completion_queue, rpc_event):
self._state = state
self._lock = threading.Lock()
self._completion_queue = completion_queue
self._call = rpc_event.call
def __call__(self):
with self._state.condition:
self._state.parked_handlers += 1
if self._state.parked_handlers == test_constants.THREAD_CONCURRENCY:
self._state.condition.notify_all()
while not self._state.handlers_released:
self._state.condition.wait()
with self._lock:
self._call.start_server_batch(
(cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
_RECEIVE_CLOSE_ON_SERVER_TAG)
self._call.start_server_batch(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
_RECEIVE_MESSAGE_TAG)
first_event = self._completion_queue.poll()
if _is_cancellation_event(first_event):
self._completion_queue.poll()
else:
with self._lock:
operations = (
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x79\x57', _EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
_EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
_EMPTY_FLAGS),
)
self._call.start_server_batch(operations,
_SERVER_COMPLETE_CALL_TAG)
self._completion_queue.poll()
self._completion_queue.poll()
def _serve(state, server, server_completion_queue, thread_pool):
for _ in range(test_constants.RPC_CONCURRENCY):
call_completion_queue = cygrpc.CompletionQueue()
server.request_call(call_completion_queue, server_completion_queue,
_REQUEST_CALL_TAG)
rpc_event = server_completion_queue.poll()
thread_pool.submit(_Handler(state, call_completion_queue, rpc_event))
with state.condition:
state.handled_rpcs += 1
if test_constants.RPC_CONCURRENCY <= state.handled_rpcs:
state.condition.notify_all()
server_completion_queue.poll()
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def events(self, at_least):
with self._condition:
while len(self._events) < at_least:
self._condition.wait()
return tuple(self._events)
class CancelManyCallsTest(unittest.TestCase):
def testCancelManyCalls(self):
server_thread_pool = logging_pool.pool(
test_constants.THREAD_CONCURRENCY)
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server([
(
b'grpc.so_reuseport',
0,
),
])
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(), None)
state = _State()
server_thread_args = (
state,
server,
server_completion_queue,
server_thread_pool,
)
server_thread = threading.Thread(target=_serve, args=server_thread_args)
server_thread.start()
client_condition = threading.Condition()
client_due = set()
client_completion_queue = cygrpc.CompletionQueue()
client_driver = _QueueDriver(client_condition, client_completion_queue,
client_due)
client_driver.start()
with client_condition:
client_calls = []
for index in range(test_constants.RPC_CONCURRENCY):
client_call = channel.create_call(None, _EMPTY_FLAGS,
client_completion_queue,
b'/twinkies', None, None)
operations = (
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.SendMessageOperation(b'\x45\x56', _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
tag = 'client_complete_call_{0:04d}_tag'.format(index)
client_call.start_client_batch(operations, tag)
client_due.add(tag)
client_calls.append(client_call)
with state.condition:
while True:
if state.parked_handlers < test_constants.THREAD_CONCURRENCY:
state.condition.wait()
elif state.handled_rpcs < test_constants.RPC_CONCURRENCY:
state.condition.wait()
else:
state.handlers_released = True
state.condition.notify_all()
break
client_driver.events(
test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
with client_condition:
for client_call in client_calls:
client_call.cancel()
with state.condition:
server.shutdown(server_completion_queue, _SERVER_SHUTDOWN_TAG)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | 6,177,281,417,734,686,000 | 35.827907 | 80 | 0.575272 | false |
trthanhquang/bus-assistant | webApp/getBusTiming.py | 1 | 2827 | #!/usr/bin/env python
import urllib2
from bs4 import BeautifulSoup as BS
import re
import time
def getAgenciesList():
agenciesList_req = urllib2.Request('''http://services.my511.org/Transit2.0/GetAgencies.aspx?token=aeeb38de-5385-482a-abde-692dfb2769e3''')
xml_resp = urllib2.urlopen(agenciesList_req)
soup = BS(xml_resp.read(),'lxml')
print soup.prettify()
agencies = soup.find_all('agency')
for a in agencies:
print a['name']
def getBusList(busCodes):
api_url = '''http://services.my511.org/Transit2.0/GetRoutesForAgencies.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&agencyNames=SF-MUNI'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
routes = soup.find_all('route')
for route in routes:
if route['code'] in busCodes:
print route.prettify()
def getBusStopsList():
api_url = '''http://services.my511.org/Transit2.0/GetStopsForRoute.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&routeIDF=SF-MUNI~8X~Inbound'''
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
print soup.prettify()
def getNextDepartures(stopcode,buscode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
# print soup.prettify()
route = soup.find('route',{'code':buscode})
l = route.departuretimelist.getText().split()
if l:
print '-- %s\t%s (mins)'%(buscode,', '.join(l))
else:
print '-- %s\tUnavailable'%buscode
return l
class busTime:
def __init__(self,busCode,busTime=[]):
self.busCode = busCode #String
self.busTime = busTime #List of String
def __str__(self):
return self.busCode
class busStopStatus:
def __init__(self,stopcode,description="",departureList=[]):
self.stopcode = stopcode
self.description = description
self.departureList = departureList
def getBusStopStatus(stopcode):
api_url = '''http://services.my511.org/Transit2.0/
GetNextDeparturesByStopCode.aspx
?token=aeeb38de-5385-482a-abde-692dfb2769e3
&stopcode=%s'''%stopcode
req = urllib2.urlopen(''.join(api_url.split()))
soup = BS(req.read(),'lxml')
description = soup.find('stop')['name']
status = busStopStatus(stopcode,description,[])
for bus in soup.find_all('route'):
departtime = busTime(bus['code'],[])
timeList = bus.departuretimelist.getText().split()
if timeList:
print '-- %s\t%s (mins)'%(bus['code'],', '.join(timeList))
for t in timeList:
departtime.busTime.append(t)
status.departureList.append(departtime)
else:
print '-- %s\tUnavailable'%bus['code']
return status
if __name__ == '__main__':
print 'BUS TIMING... :D\n'
print time.ctime(time.time())
getBusStopStatus(16367) | mit | 6,510,849,392,254,107,000 | 27 | 139 | 0.694022 | false |
NYUEcon/NYUecondata | psid/psid.py | 1 | 12753 | """
Working with PSID in python
@author : Spencer Lyon <[email protected]>
@date : 2015-02-04 09:02:56
use the read_csv option `usecols` to only keep what we need
"""
import re
import os
import gc
import os.path
import zipfile
import requests
import lxml.html
import numpy as np
import pandas as pd
# ----------- #
# Downloading #
# ----------- #
# Define lookup that maps years into request numbers.
file_year = map(str, list(range(1968, 1998)) + list(range(1999, 2012, 2)))
request_numbers = map(str, ([1056] + list(range(1058, 1083)) +
list(range(1047, 1052)) +
[1040, 1052, 1132, 1139, 1152, 1156]))
file_lookup = dict(zip(file_year, request_numbers))
file_lookup["ind"] = "1053"
def start_psid_session(user=None, password=None):
"""
Use user supplied login details to log in to umich site for PSID
download
"""
login_url = "http://simba.isr.umich.edu/u/Login.aspx"
# start html session so we can log in
session = requests.session()
start = session.get(login_url)
html = start.text
root = lxml.html.fromstring(html)
# Stuff so we can log in
EVAL = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value']
VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value']
acc_pwd = {'ctl00$ContentPlaceHolder1$Login1$UserName': user,
'ctl00$ContentPlaceHolder1$Login1$Password': password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton': 'Log In',
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': VIEWSTATE,
'__EVENTVALIDATION': EVAL}
# Send login message to PSID site
session.post(login_url, data=acc_pwd)
# Check for login
z = session.get('http://simba.isr.umich.edu/data/data.aspx')
tf2 = 'Logout' in str(z.content)
print('Successful login: %s' % (tf2))
return session
# Function to download PSID zip file
def download_psid(number, local_filename, session):
"""
Download a zip file form the PSID and save to local_filename
"""
request_start = 'http://simba.isr.umich.edu/Zips/GetFile.aspx?file='
# Get the file using requests
r = session.get(request_start + number, stream=True)
with open(local_filename, 'wb') as f:
# Write it out in chunks incase it's big
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
# Extracting PSID using psid_unzip.
def psid_unzip(filename, extractall=False):
zfile = zipfile.ZipFile(filename)
def keep_file(n):
if extractall:
return True
else:
return ".sas" in name or ".txt" in name or ".pdf" in name
for name in zfile.namelist():
# Only take out the files we want
if keep_file(name):
(dirname, filename) = os.path.split(name)
if ".pdf" in name: # Different directory for Codebooks
dirname = dirname + "Codebooks"
if ".txt" in name:
nascii = name # Keep track of ascii name
if ".sas" in name:
nsas = name # Keep track of sas name
print("Decompressing %s on %s" % (filename, dirname))
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname) # Extract file
return (nsas, nascii)
def sascii2csv(sas_name, ascii_name, csv_name, remove_orig=True):
"""
Read in ascii data from SAS commands and write out csv
"""
# Open sas file
x = open(sas_name, "r")
dat = x.read()
dat_split = dat.split('\n')
# RE for variable designation
re_var = "^\s*(?P<variable>\S+)\s+"
# RE for variable label
re_label = '[(LABEL)(label)]\s*=\s*"(?P<label>[^"]+)"'
# RE for variable format
re_format = "[(FORMAT)(format)]\s*=\s*(?P<format>\S+)\s"
# RE for variable position
re_length = "\s*(?P<length1>\d*)\s*-\s*(?P<length2>\d*)\s*"
meta = []
for dstr in dat_split:
res_var = re.search(re_var, dstr) # Find variable name in line
res_label = re.search(re_label, dstr) # Find variable label
res_format = re.search(re_format, dstr) # Find variable format
if not (res_var is None or res_label is None or res_format is None):
# Now that we have a verified variable name...
# Find position RE
counts = re.search(res_var.group("variable")+re_length, dat)
l1 = int(counts.group("length1")) # Grab out first position
l2 = int(counts.group("length2")) # Grab out second position
# Add to meta data
meta += [{"variable": res_var.group("variable"),
"label": res_label.group("label"),
"format": res_format.group("format"),
"l1": l1,
"l2": l2,
"l3": l2 - l1 + 1}]
# Get relevant descriptions
names = [z["label"] for z in meta]
lengths = [z["l3"] for z in meta]
del meta
# Use numpy to read fixed width file and write as .csv
data = np.genfromtxt(ascii_name, names=names, delimiter=lengths)
np.savetxt(csv_name, data, delimiter=',',
header=','.join(data.dtype.names))
del data
if remove_orig:
os.remove(sas_name)
os.remove(ascii_name)
def download_unzip_csv_psid(f_name, request_num, session, to_csv=True,
remove_orig=True, verbose=True):
"""
Download a family data set
"""
# Download zip file
if verbose:
print("Downloading %s" % f_name)
x = download_psid(str(request_num), f_name, session)
# Unzip
if verbose:
print("Unzipping %s" % f_name)
sas_name, ascii_name = psid_unzip(f_name)
if to_csv:
if verbose:
print("Converting %s to csv" % ascii_name)
# generate csv_name and convert to csv
csv_name = f_name.strip(".zip") + ".csv"
sascii2csv(sas_name, ascii_name, csv_name, remove_orig=remove_orig)
if remove_orig:
os.remove(f_name)
gc.collect()
def download_all_family_data(session, to_csv=True, **kwargs):
"""
Download all family data sets
"""
for (fy, rn) in file_lookup.copy().pop("ind").items():
fn = "FAM" + fy + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=to_csv, **kwargs)
return
def download_ind_cross_year(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("IND2011ER.zip", str(1053), session,
to_csv=to_csv, **kwargs)
return
def download_parentfile(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("PID2011ER.zip", str(1123), session,
to_csv=to_csv, **kwargs)
return
def download_all_data(session, to_csv=True, **kwargs):
"""
Call the download ind and download all family functions
"""
download_ind_cross_year(session, to_csv=True, **kwargs)
download_all_family_data(session, to_csv=True, **kwargs)
return
# -------- #
# Cleaning #
# -------- #
def clean_indfile_names(df):
"""
Most of the columns in the PSID individual file have many
underscores in between the variable name and the year. The next few
lines remove those cases and re- assigns the column names.
This is necessary for us to save that data to hdf in table format
"""
cols = pd.Series(df.columns, dtype=str)
c2 = cols.str.extract("(.+?)__+(\d\d)")
cols2 = c2[0] + c2[1]
cols2 = cols2.fillna(cols)
df.cols = cols2
return df
def csv2hdf(csv_fn, hdf_fn, hdf_gn=None, hdf_mode="a",
extra_func=None):
"""
Move the file csv_fn to an HDF file.
Parameters
----------
csv_fn : string
The file name for the csv
hdf_fn: string
The name of the hdf file to write to
hdf_gn: string, optional
A string specifying the `path` to the group to contain the
dataset. If none is given, the data set is saved to `/fn`, where
fn is the root of csv_fn
hdf_mode: string, optional(default="a")
The open mode for the hdf file. Default is append
extra_func: function, optional(default=None)
An extra function the user can supply to clean or otherwise
alter the data set after reading in from csv, but before saving
to hdf
Returns
-------
None
Notes
-----
This function tries to write the data set in table form, but if it
cannot it will fallback to writing in fixed form.
For a discussion on the differences see the pandas manual
"""
df = pd.read_csv(csv_fn)
if extra_func is not None:
df = extra_func(df)
if hdf_gn is None:
# split to path/file then chop last 4 characters off (`.csv`)
hdf_gn = os.path.split(csv_fn)[1][:-4]
try:
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="table",
complib="blosc")
print("Added %s to %s" % (hdf_gn, hdf_fn))
except:
print("WARN: Couldn't store %s as table. Using fixed" % hdf_gn)
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="fixed",
complib="blosc")
return
def _convert_to_4_digit_year(yr):
print("recieved yr: %s" % yr)
if len(yr) == 4:
return yr
if len(yr) == 1:
return "200" + yr
if len(yr) == 3:
raise ValueError("Can't parse three digit year")
iy = int(yr)
if 0 <= iy <= 9: # 200x
return "20" + yr
elif 10 < iy <= int(str(datetime.datetime.now().year)[2:]):
return "20" + yr
else: # assuming in 1900's
return "19" + yr
if __name__ == '__main__':
import glob
import argparse
import datetime
from textwrap import dedent
d_help = dedent("""\
Download the specified data file. If argument begins with a, all files
will be downloaded. If it begins with i, only the cross-year individual
file will be downloaded. If it is of the form fYY or fYYYY then only the
family file for the given year will be downloaded
""")
# create parser and add arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--download",
help=d_help)
parser.add_argument("--hdf",
help="Convert csv files to hdf named PSID.hdf",
action="store_true")
parser.add_argument("-u", "--username",
help="Specify username for PSID website")
parser.add_argument("-p", "--password",
help="Specify password for PSID website")
args = parser.parse_args()
# Handle download arg
if args.download:
# make sure we have a user_name and password
if args.username is None or args.password is None:
msg = dedent("""\
Must supply username and password. Example syntax:
`python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`
If you don't yet have an account, go to http://simba.isr.umich.edu
and create one
""")
raise ValueError(msg)
a = args.download
session = start_psid_session(user=args.username,
password=args.password)
if a.startswith("a"): # download all
download_all_data(session)
elif a.startswith("i"): # download individual file
download_ind_cross_year(session, to_csv=True)
elif a.startswith("p"): # download parent id file
download_parentfile(session, to_csv=True)
else:
# download single family file
m = re.match("f?(\d+)", a.lower())
if m is not None:
yr = m.groups()[0]
yr = _convert_to_4_digit_year(yr)
rn = file_lookup[yr]
fn = "FAM" + yr + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=True)
else:
raise ValueError("Could not parse download option")
# Handle hdf arg
if args.hdf:
fnames = glob.glob("./*.csv") # get csv file names.
fnames.sort(reverse=True) # Sorting to put IND file at top
for f in fnames:
if f.lower().startswith("ind"):
csv2hdf(f, "PSID.hdf", extra_func=clean_indfile_names)
else:
csv2hdf(f, "PSID.hdf")
| mit | -7,739,318,489,776,719,000 | 28.183066 | 78 | 0.57312 | false |
Ry09/Python-projects | Programs & Challenges from Python for beginners book/Chapter 2/wordProblems.py | 1 | 1208 | #Program using word problems to demonstrate math and number operators
print("If a 2000 pound pregnant hippo gives birth to a 100 pound calf,")
print("but then eats 50 pounds of food, how much does she weigh?")
input("Press the enter key to find out.")
print("2000 - 100 + 50 = ", 2000 - 100 + 50)
print("\nIf an adventurer returns from a successful quest and buys each of")
print("6 companions 3 bottles of ale, how many bottles are purchased?")
input("Press the enter key to find out")
print("6 * 3 = ",6 * 3)
print("\nIf a restaurant check comes to 19 dollars with tip, and you and")
print("your friends split it evenly 4 ways, how much do each of you throw in?")
input("Press the enter key to find out.")
print("19 / 4 = ", 19 / 4)
print("\nIf a group of 4 pirates finds a chest full of 107 gold coins, and")
print("they divide the botty evenly, how many whole coins does each get?")
input("Press the enter key to find out.")
print("107 // 4 = ", 107 // 4)
print("\nIf the same group of 4 pirates evenly divides the chest full")
print("of 107 gold coins, how many coins are left over?")
input("Press the enter key to find out.")
print("107 % 4 = ", 107 % 4)
input("\n\nPress the enter key to exit")
| mit | -6,853,118,525,491,717,000 | 42.142857 | 79 | 0.70447 | false |
clebergnu/autotest | server/server_job.py | 1 | 45482 | """
The main job wrapper for the server side.
This is the core infrastructure. Derived from the client side job.py
Copyright Martin J. Bligh, Andy Whitcroft 2007
"""
import getpass, os, sys, re, stat, tempfile, time, select, subprocess, platform
import traceback, shutil, warnings, fcntl, pickle, logging, itertools, errno
from autotest_lib.client.bin import sysinfo
from autotest_lib.client.common_lib import base_job
from autotest_lib.client.common_lib import error, log, utils, packages
from autotest_lib.client.common_lib import logging_manager
from autotest_lib.server import test, subcommand, profilers
from autotest_lib.server.hosts import abstract_ssh
from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
def _control_segment_path(name):
"""Get the pathname of the named control segment file."""
server_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(server_dir, "control_segments", name)
CLIENT_CONTROL_FILENAME = 'control'
SERVER_CONTROL_FILENAME = 'control.srv'
MACHINES_FILENAME = '.machines'
CLIENT_WRAPPER_CONTROL_FILE = _control_segment_path('client_wrapper')
CRASHDUMPS_CONTROL_FILE = _control_segment_path('crashdumps')
CRASHINFO_CONTROL_FILE = _control_segment_path('crashinfo')
INSTALL_CONTROL_FILE = _control_segment_path('install')
CLEANUP_CONTROL_FILE = _control_segment_path('cleanup')
VERIFY_CONTROL_FILE = _control_segment_path('verify')
REPAIR_CONTROL_FILE = _control_segment_path('repair')
# by default provide a stub that generates no site data
def _get_site_job_data_dummy(job):
return {}
class status_indenter(base_job.status_indenter):
"""Provide a simple integer-backed status indenter."""
def __init__(self):
self._indent = 0
@property
def indent(self):
return self._indent
def increment(self):
self._indent += 1
def decrement(self):
self._indent -= 1
def get_context(self):
"""Returns a context object for use by job.get_record_context."""
class context(object):
def __init__(self, indenter, indent):
self._indenter = indenter
self._indent = indent
def restore(self):
self._indenter._indent = self._indent
return context(self, self._indent)
class server_job_record_hook(object):
"""The job.record hook for server job. Used to inject WARN messages from
the console or vlm whenever new logs are written, and to echo any logs
to INFO level logging. Implemented as a class so that it can use state to
block recursive calls, so that the hook can call job.record itself to
log WARN messages.
Depends on job._read_warnings and job._logger.
"""
def __init__(self, job):
self._job = job
self._being_called = False
def __call__(self, entry):
"""A wrapper around the 'real' record hook, the _hook method, which
prevents recursion. This isn't making any effort to be threadsafe,
the intent is to outright block infinite recursion via a
job.record->_hook->job.record->_hook->job.record... chain."""
if self._being_called:
return
self._being_called = True
try:
self._hook(self._job, entry)
finally:
self._being_called = False
@staticmethod
def _hook(job, entry):
"""The core hook, which can safely call job.record."""
entries = []
# poll all our warning loggers for new warnings
for timestamp, msg in job._read_warnings():
warning_entry = base_job.status_log_entry(
'WARN', None, None, msg, {}, timestamp=timestamp)
entries.append(warning_entry)
job.record_entry(warning_entry)
# echo rendered versions of all the status logs to info
entries.append(entry)
for entry in entries:
rendered_entry = job._logger.render_entry(entry)
logging.info(rendered_entry)
job._parse_status(rendered_entry)
class base_server_job(base_job.base_job):
"""The server-side concrete implementation of base_job.
Optional properties provided by this implementation:
serverdir
conmuxdir
num_tests_run
num_tests_failed
warning_manager
warning_loggers
"""
_STATUS_VERSION = 1
def __init__(self, control, args, resultdir, label, user, machines,
client=False, parse_job='',
ssh_user='root', ssh_port=22, ssh_pass='',
group_name='', tag='',
control_filename=SERVER_CONTROL_FILENAME):
"""
Create a server side job object.
@param control: The pathname of the control file.
@param args: Passed to the control file.
@param resultdir: Where to throw the results.
@param label: Description of the job.
@param user: Username for the job (email address).
@param client: True if this is a client-side control file.
@param parse_job: string, if supplied it is the job execution tag that
the results will be passed through to the TKO parser with.
@param ssh_user: The SSH username. [root]
@param ssh_port: The SSH port number. [22]
@param ssh_pass: The SSH passphrase, if needed.
@param group_name: If supplied, this will be written out as
host_group_name in the keyvals file for the parser.
@param tag: The job execution tag from the scheduler. [optional]
@param control_filename: The filename where the server control file
should be written in the results directory.
"""
super(base_server_job, self).__init__(resultdir=resultdir)
path = os.path.dirname(__file__)
self.control = control
self._uncollected_log_file = os.path.join(self.resultdir,
'uncollected_logs')
debugdir = os.path.join(self.resultdir, 'debug')
if not os.path.exists(debugdir):
os.mkdir(debugdir)
if user:
self.user = user
else:
self.user = getpass.getuser()
self.args = args
self.machines = machines
self._client = client
self.warning_loggers = set()
self.warning_manager = warning_manager()
self._ssh_user = ssh_user
self._ssh_port = ssh_port
self._ssh_pass = ssh_pass
self.tag = tag
self.last_boot_tag = None
self.hosts = set()
self.drop_caches = False
self.drop_caches_between_iterations = False
self._control_filename = control_filename
self.logging = logging_manager.get_logging_manager(
manage_stdout_and_stderr=True, redirect_fds=True)
subcommand.logging_manager_object = self.logging
self.sysinfo = sysinfo.sysinfo(self.resultdir)
self.profilers = profilers.profilers(self)
job_data = {'label' : label, 'user' : user,
'hostname' : ','.join(machines),
'drone' : platform.node(),
'status_version' : str(self._STATUS_VERSION),
'job_started' : str(int(time.time()))}
if group_name:
job_data['host_group_name'] = group_name
# only write these keyvals out on the first job in a resultdir
if 'job_started' not in utils.read_keyval(self.resultdir):
job_data.update(get_site_job_data(self))
utils.write_keyval(self.resultdir, job_data)
self._parse_job = parse_job
self._using_parser = (self._parse_job and len(machines) <= 1)
self.pkgmgr = packages.PackageManager(
self.autodir, run_function_dargs={'timeout':600})
self.num_tests_run = 0
self.num_tests_failed = 0
self._register_subcommand_hooks()
# these components aren't usable on the server
self.bootloader = None
self.harness = None
# set up the status logger
self._indenter = status_indenter()
self._logger = base_job.status_logger(
self, self._indenter, 'status.log', 'status.log',
record_hook=server_job_record_hook(self))
@classmethod
def _find_base_directories(cls):
"""
Determine locations of autodir, clientdir and serverdir. Assumes
that this file is located within serverdir and uses __file__ along
with relative paths to resolve the location.
"""
serverdir = os.path.abspath(os.path.dirname(__file__))
autodir = os.path.normpath(os.path.join(serverdir, '..'))
clientdir = os.path.join(autodir, 'client')
return autodir, clientdir, serverdir
def _find_resultdir(self, resultdir):
"""
Determine the location of resultdir. For server jobs we expect one to
always be explicitly passed in to __init__, so just return that.
"""
if resultdir:
return os.path.normpath(resultdir)
else:
return None
def _get_status_logger(self):
"""Return a reference to the status logger."""
return self._logger
@staticmethod
def _load_control_file(path):
f = open(path)
try:
control_file = f.read()
finally:
f.close()
return re.sub('\r', '', control_file)
def _register_subcommand_hooks(self):
"""
Register some hooks into the subcommand modules that allow us
to properly clean up self.hosts created in forked subprocesses.
"""
def on_fork(cmd):
self._existing_hosts_on_fork = set(self.hosts)
def on_join(cmd):
new_hosts = self.hosts - self._existing_hosts_on_fork
for host in new_hosts:
host.close()
subcommand.subcommand.register_fork_hook(on_fork)
subcommand.subcommand.register_join_hook(on_join)
def init_parser(self):
"""
Start the continuous parsing of self.resultdir. This sets up
the database connection and inserts the basic job object into
the database if necessary.
"""
if not self._using_parser:
return
# redirect parser debugging to .parse.log
parse_log = os.path.join(self.resultdir, '.parse.log')
parse_log = open(parse_log, 'w', 0)
tko_utils.redirect_parser_debugging(parse_log)
# create a job model object and set up the db
self.results_db = tko_db.db(autocommit=True)
self.parser = status_lib.parser(self._STATUS_VERSION)
self.job_model = self.parser.make_job(self.resultdir)
self.parser.start(self.job_model)
# check if a job already exists in the db and insert it if
# it does not
job_idx = self.results_db.find_job(self._parse_job)
if job_idx is None:
self.results_db.insert_job(self._parse_job, self.job_model)
else:
machine_idx = self.results_db.lookup_machine(self.job_model.machine)
self.job_model.index = job_idx
self.job_model.machine_idx = machine_idx
def cleanup_parser(self):
"""
This should be called after the server job is finished
to carry out any remaining cleanup (e.g. flushing any
remaining test results to the results db)
"""
if not self._using_parser:
return
final_tests = self.parser.end()
for test in final_tests:
self.__insert_test(test)
self._using_parser = False
def verify(self):
if not self.machines:
raise error.AutoservError('No machines specified to verify')
if self.resultdir:
os.chdir(self.resultdir)
try:
namespace = {'machines' : self.machines, 'job' : self,
'ssh_user' : self._ssh_user,
'ssh_port' : self._ssh_port,
'ssh_pass' : self._ssh_pass}
self._execute_code(VERIFY_CONTROL_FILE, namespace, protect=False)
except Exception, e:
msg = ('Verify failed\n' + str(e) + '\n' + traceback.format_exc())
self.record('ABORT', None, None, msg)
raise
def repair(self, host_protection):
if not self.machines:
raise error.AutoservError('No machines specified to repair')
if self.resultdir:
os.chdir(self.resultdir)
namespace = {'machines': self.machines, 'job': self,
'ssh_user': self._ssh_user, 'ssh_port': self._ssh_port,
'ssh_pass': self._ssh_pass,
'protection_level': host_protection}
self._execute_code(REPAIR_CONTROL_FILE, namespace, protect=False)
def precheck(self):
"""
perform any additional checks in derived classes.
"""
pass
def enable_external_logging(self):
"""
Start or restart external logging mechanism.
"""
pass
def disable_external_logging(self):
"""
Pause or stop external logging mechanism.
"""
pass
def use_external_logging(self):
"""
Return True if external logging should be used.
"""
return False
def _make_parallel_wrapper(self, function, machines, log):
"""Wrap function as appropriate for calling by parallel_simple."""
is_forking = not (len(machines) == 1 and self.machines == machines)
if self._parse_job and is_forking and log:
def wrapper(machine):
self._parse_job += "/" + machine
self._using_parser = True
self.machines = [machine]
self.push_execution_context(machine)
os.chdir(self.resultdir)
utils.write_keyval(self.resultdir, {"hostname": machine})
self.init_parser()
result = function(machine)
self.cleanup_parser()
return result
elif len(machines) > 1 and log:
def wrapper(machine):
self.push_execution_context(machine)
os.chdir(self.resultdir)
machine_data = {'hostname' : machine,
'status_version' : str(self._STATUS_VERSION)}
utils.write_keyval(self.resultdir, machine_data)
result = function(machine)
return result
else:
wrapper = function
return wrapper
def parallel_simple(self, function, machines, log=True, timeout=None,
return_results=False):
"""
Run 'function' using parallel_simple, with an extra wrapper to handle
the necessary setup for continuous parsing, if possible. If continuous
parsing is already properly initialized then this should just work.
@param function: A callable to run in parallel given each machine.
@param machines: A list of machine names to be passed one per subcommand
invocation of function.
@param log: If True, output will be written to output in a subdirectory
named after each machine.
@param timeout: Seconds after which the function call should timeout.
@param return_results: If True instead of an AutoServError being raised
on any error a list of the results|exceptions from the function
called on each arg is returned. [default: False]
@raises error.AutotestError: If any of the functions failed.
"""
wrapper = self._make_parallel_wrapper(function, machines, log)
return subcommand.parallel_simple(wrapper, machines,
log=log, timeout=timeout,
return_results=return_results)
def parallel_on_machines(self, function, machines, timeout=None):
"""
@param function: Called in parallel with one machine as its argument.
@param machines: A list of machines to call function(machine) on.
@param timeout: Seconds after which the function call should timeout.
@returns A list of machines on which function(machine) returned
without raising an exception.
"""
results = self.parallel_simple(function, machines, timeout=timeout,
return_results=True)
success_machines = []
for result, machine in itertools.izip(results, machines):
if not isinstance(result, Exception):
success_machines.append(machine)
return success_machines
_USE_TEMP_DIR = object()
def run(self, cleanup=False, install_before=False, install_after=False,
collect_crashdumps=True, namespace={}, control=None,
control_file_dir=None, only_collect_crashinfo=False):
# for a normal job, make sure the uncollected logs file exists
# for a crashinfo-only run it should already exist, bail out otherwise
created_uncollected_logs = False
if self.resultdir and not os.path.exists(self._uncollected_log_file):
if only_collect_crashinfo:
# if this is a crashinfo-only run, and there were no existing
# uncollected logs, just bail out early
logging.info("No existing uncollected logs, "
"skipping crashinfo collection")
return
else:
log_file = open(self._uncollected_log_file, "w")
pickle.dump([], log_file)
log_file.close()
created_uncollected_logs = True
# use a copy so changes don't affect the original dictionary
namespace = namespace.copy()
machines = self.machines
if control is None:
if self.control is None:
control = ''
else:
control = self._load_control_file(self.control)
if control_file_dir is None:
control_file_dir = self.resultdir
self.aborted = False
namespace['machines'] = machines
namespace['args'] = self.args
namespace['job'] = self
namespace['ssh_user'] = self._ssh_user
namespace['ssh_port'] = self._ssh_port
namespace['ssh_pass'] = self._ssh_pass
test_start_time = int(time.time())
if self.resultdir:
os.chdir(self.resultdir)
# touch status.log so that the parser knows a job is running here
open(self.get_status_log_path(), 'a').close()
self.enable_external_logging()
collect_crashinfo = True
temp_control_file_dir = None
try:
try:
if install_before and machines:
self._execute_code(INSTALL_CONTROL_FILE, namespace)
if only_collect_crashinfo:
return
# determine the dir to write the control files to
cfd_specified = (control_file_dir
and control_file_dir is not self._USE_TEMP_DIR)
if cfd_specified:
temp_control_file_dir = None
else:
temp_control_file_dir = tempfile.mkdtemp(
suffix='temp_control_file_dir')
control_file_dir = temp_control_file_dir
server_control_file = os.path.join(control_file_dir,
self._control_filename)
client_control_file = os.path.join(control_file_dir,
CLIENT_CONTROL_FILENAME)
if self._client:
namespace['control'] = control
utils.open_write_close(client_control_file, control)
shutil.copyfile(CLIENT_WRAPPER_CONTROL_FILE,
server_control_file)
else:
utils.open_write_close(server_control_file, control)
logging.info("Processing control file")
self._execute_code(server_control_file, namespace)
logging.info("Finished processing control file")
# no error occured, so we don't need to collect crashinfo
collect_crashinfo = False
except Exception, e:
try:
logging.exception(
'Exception escaped control file, job aborting:')
self.record('INFO', None, None, str(e),
{'job_abort_reason': str(e)})
except:
pass # don't let logging exceptions here interfere
raise
finally:
if temp_control_file_dir:
# Clean up temp directory used for copies of the control files
try:
shutil.rmtree(temp_control_file_dir)
except Exception, e:
logging.warn('Could not remove temp directory %s: %s',
temp_control_file_dir, e)
if machines and (collect_crashdumps or collect_crashinfo):
namespace['test_start_time'] = test_start_time
if collect_crashinfo:
# includes crashdumps
self._execute_code(CRASHINFO_CONTROL_FILE, namespace)
else:
self._execute_code(CRASHDUMPS_CONTROL_FILE, namespace)
if self._uncollected_log_file and created_uncollected_logs:
os.remove(self._uncollected_log_file)
self.disable_external_logging()
if cleanup and machines:
self._execute_code(CLEANUP_CONTROL_FILE, namespace)
if install_after and machines:
self._execute_code(INSTALL_CONTROL_FILE, namespace)
def run_test(self, url, *args, **dargs):
"""
Summon a test object and run it.
tag
tag to add to testname
url
url of the test to run
"""
group, testname = self.pkgmgr.get_package_name(url, 'test')
testname, subdir, tag = self._build_tagged_test_name(testname, dargs)
outputdir = self._make_test_outputdir(subdir)
def group_func():
try:
test.runtest(self, url, tag, args, dargs)
except error.TestBaseException, e:
self.record(e.exit_status, subdir, testname, str(e))
raise
except Exception, e:
info = str(e) + "\n" + traceback.format_exc()
self.record('FAIL', subdir, testname, info)
raise
else:
self.record('GOOD', subdir, testname, 'completed successfully')
result, exc_info = self._run_group(testname, subdir, group_func)
if exc_info and isinstance(exc_info[1], error.TestBaseException):
return False
elif exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
else:
return True
def _run_group(self, name, subdir, function, *args, **dargs):
"""\
Underlying method for running something inside of a group.
"""
result, exc_info = None, None
try:
self.record('START', subdir, name)
result = function(*args, **dargs)
except error.TestBaseException, e:
self.record("END %s" % e.exit_status, subdir, name)
exc_info = sys.exc_info()
except Exception, e:
err_msg = str(e) + '\n'
err_msg += traceback.format_exc()
self.record('END ABORT', subdir, name, err_msg)
raise error.JobError(name + ' failed\n' + traceback.format_exc())
else:
self.record('END GOOD', subdir, name)
return result, exc_info
def run_group(self, function, *args, **dargs):
"""\
function:
subroutine to run
*args:
arguments for the function
"""
name = function.__name__
# Allow the tag for the group to be specified.
tag = dargs.pop('tag', None)
if tag:
name = tag
return self._run_group(name, None, function, *args, **dargs)[0]
def run_reboot(self, reboot_func, get_kernel_func):
"""\
A specialization of run_group meant specifically for handling
a reboot. Includes support for capturing the kernel version
after the reboot.
reboot_func: a function that carries out the reboot
get_kernel_func: a function that returns a string
representing the kernel version.
"""
try:
self.record('START', None, 'reboot')
reboot_func()
except Exception, e:
err_msg = str(e) + '\n' + traceback.format_exc()
self.record('END FAIL', None, 'reboot', err_msg)
raise
else:
kernel = get_kernel_func()
self.record('END GOOD', None, 'reboot',
optional_fields={"kernel": kernel})
def run_control(self, path):
"""Execute a control file found at path (relative to the autotest
path). Intended for executing a control file within a control file,
not for running the top-level job control file."""
path = os.path.join(self.autodir, path)
control_file = self._load_control_file(path)
self.run(control=control_file, control_file_dir=self._USE_TEMP_DIR)
def add_sysinfo_command(self, command, logfile=None, on_every_test=False):
self._add_sysinfo_loggable(sysinfo.command(command, logf=logfile),
on_every_test)
def add_sysinfo_logfile(self, file, on_every_test=False):
self._add_sysinfo_loggable(sysinfo.logfile(file), on_every_test)
def _add_sysinfo_loggable(self, loggable, on_every_test):
if on_every_test:
self.sysinfo.test_loggables.add(loggable)
else:
self.sysinfo.boot_loggables.add(loggable)
def _read_warnings(self):
"""Poll all the warning loggers and extract any new warnings that have
been logged. If the warnings belong to a category that is currently
disabled, this method will discard them and they will no longer be
retrievable.
Returns a list of (timestamp, message) tuples, where timestamp is an
integer epoch timestamp."""
warnings = []
while True:
# pull in a line of output from every logger that has
# output ready to be read
loggers, _, _ = select.select(self.warning_loggers, [], [], 0)
closed_loggers = set()
for logger in loggers:
line = logger.readline()
# record any broken pipes (aka line == empty)
if len(line) == 0:
closed_loggers.add(logger)
continue
# parse out the warning
timestamp, msgtype, msg = line.split('\t', 2)
timestamp = int(timestamp)
# if the warning is valid, add it to the results
if self.warning_manager.is_valid(timestamp, msgtype):
warnings.append((timestamp, msg.strip()))
# stop listening to loggers that are closed
self.warning_loggers -= closed_loggers
# stop if none of the loggers have any output left
if not loggers:
break
# sort into timestamp order
warnings.sort()
return warnings
def _unique_subdirectory(self, base_subdirectory_name):
"""Compute a unique results subdirectory based on the given name.
Appends base_subdirectory_name with a number as necessary to find a
directory name that doesn't already exist.
"""
subdirectory = base_subdirectory_name
counter = 1
while os.path.exists(os.path.join(self.resultdir, subdirectory)):
subdirectory = base_subdirectory_name + '.' + str(counter)
counter += 1
return subdirectory
def get_record_context(self):
"""Returns an object representing the current job.record context.
The object returned is an opaque object with a 0-arg restore method
which can be called to restore the job.record context (i.e. indentation)
to the current level. The intention is that it should be used when
something external which generate job.record calls (e.g. an autotest
client) can fail catastrophically and the server job record state
needs to be reset to its original "known good" state.
@return: A context object with a 0-arg restore() method."""
return self._indenter.get_context()
def record_summary(self, status_code, test_name, reason='', attributes=None,
distinguishing_attributes=(), child_test_ids=None):
"""Record a summary test result.
@param status_code: status code string, see
common_lib.log.is_valid_status()
@param test_name: name of the test
@param reason: (optional) string providing detailed reason for test
outcome
@param attributes: (optional) dict of string keyvals to associate with
this result
@param distinguishing_attributes: (optional) list of attribute names
that should be used to distinguish identically-named test
results. These attributes should be present in the attributes
parameter. This is used to generate user-friendly subdirectory
names.
@param child_test_ids: (optional) list of test indices for test results
used in generating this result.
"""
subdirectory_name_parts = [test_name]
for attribute in distinguishing_attributes:
assert attributes
assert attribute in attributes, '%s not in %s' % (attribute,
attributes)
subdirectory_name_parts.append(attributes[attribute])
base_subdirectory_name = '.'.join(subdirectory_name_parts)
subdirectory = self._unique_subdirectory(base_subdirectory_name)
subdirectory_path = os.path.join(self.resultdir, subdirectory)
os.mkdir(subdirectory_path)
self.record(status_code, subdirectory, test_name,
status=reason, optional_fields={'is_summary': True})
if attributes:
utils.write_keyval(subdirectory_path, attributes)
if child_test_ids:
ids_string = ','.join(str(test_id) for test_id in child_test_ids)
summary_data = {'child_test_ids': ids_string}
utils.write_keyval(os.path.join(subdirectory_path, 'summary_data'),
summary_data)
def disable_warnings(self, warning_type):
self.warning_manager.disable_warnings(warning_type)
self.record("INFO", None, None,
"disabling %s warnings" % warning_type,
{"warnings.disable": warning_type})
def enable_warnings(self, warning_type):
self.warning_manager.enable_warnings(warning_type)
self.record("INFO", None, None,
"enabling %s warnings" % warning_type,
{"warnings.enable": warning_type})
def get_status_log_path(self, subdir=None):
"""Return the path to the job status log.
@param subdir - Optional paramter indicating that you want the path
to a subdirectory status log.
@returns The path where the status log should be.
"""
if self.resultdir:
if subdir:
return os.path.join(self.resultdir, subdir, "status.log")
else:
return os.path.join(self.resultdir, "status.log")
else:
return None
def _update_uncollected_logs_list(self, update_func):
"""Updates the uncollected logs list in a multi-process safe manner.
@param update_func - a function that updates the list of uncollected
logs. Should take one parameter, the list to be updated.
"""
if self._uncollected_log_file:
log_file = open(self._uncollected_log_file, "r+")
fcntl.flock(log_file, fcntl.LOCK_EX)
try:
uncollected_logs = pickle.load(log_file)
update_func(uncollected_logs)
log_file.seek(0)
log_file.truncate()
pickle.dump(uncollected_logs, log_file)
log_file.flush()
finally:
fcntl.flock(log_file, fcntl.LOCK_UN)
log_file.close()
def add_client_log(self, hostname, remote_path, local_path):
"""Adds a new set of client logs to the list of uncollected logs,
to allow for future log recovery.
@param host - the hostname of the machine holding the logs
@param remote_path - the directory on the remote machine holding logs
@param local_path - the local directory to copy the logs into
"""
def update_func(logs_list):
logs_list.append((hostname, remote_path, local_path))
self._update_uncollected_logs_list(update_func)
def remove_client_log(self, hostname, remote_path, local_path):
"""Removes a set of client logs from the list of uncollected logs,
to allow for future log recovery.
@param host - the hostname of the machine holding the logs
@param remote_path - the directory on the remote machine holding logs
@param local_path - the local directory to copy the logs into
"""
def update_func(logs_list):
logs_list.remove((hostname, remote_path, local_path))
self._update_uncollected_logs_list(update_func)
def get_client_logs(self):
"""Retrieves the list of uncollected logs, if it exists.
@returns A list of (host, remote_path, local_path) tuples. Returns
an empty list if no uncollected logs file exists.
"""
log_exists = (self._uncollected_log_file and
os.path.exists(self._uncollected_log_file))
if log_exists:
return pickle.load(open(self._uncollected_log_file))
else:
return []
def _fill_server_control_namespace(self, namespace, protect=True):
"""
Prepare a namespace to be used when executing server control files.
This sets up the control file API by importing modules and making them
available under the appropriate names within namespace.
For use by _execute_code().
Args:
namespace: The namespace dictionary to fill in.
protect: Boolean. If True (the default) any operation that would
clobber an existing entry in namespace will cause an error.
Raises:
error.AutoservError: When a name would be clobbered by import.
"""
def _import_names(module_name, names=()):
"""
Import a module and assign named attributes into namespace.
Args:
module_name: The string module name.
names: A limiting list of names to import from module_name. If
empty (the default), all names are imported from the module
similar to a "from foo.bar import *" statement.
Raises:
error.AutoservError: When a name being imported would clobber
a name already in namespace.
"""
module = __import__(module_name, {}, {}, names)
# No names supplied? Import * from the lowest level module.
# (Ugh, why do I have to implement this part myself?)
if not names:
for submodule_name in module_name.split('.')[1:]:
module = getattr(module, submodule_name)
if hasattr(module, '__all__'):
names = getattr(module, '__all__')
else:
names = dir(module)
# Install each name into namespace, checking to make sure it
# doesn't override anything that already exists.
for name in names:
# Check for conflicts to help prevent future problems.
if name in namespace and protect:
if namespace[name] is not getattr(module, name):
raise error.AutoservError('importing name '
'%s from %s %r would override %r' %
(name, module_name, getattr(module, name),
namespace[name]))
else:
# Encourage cleanliness and the use of __all__ for a
# more concrete API with less surprises on '*' imports.
warnings.warn('%s (%r) being imported from %s for use '
'in server control files is not the '
'first occurrance of that import.' %
(name, namespace[name], module_name))
namespace[name] = getattr(module, name)
# This is the equivalent of prepending a bunch of import statements to
# the front of the control script.
namespace.update(os=os, sys=sys, logging=logging)
_import_names('autotest_lib.server',
('hosts', 'autotest', 'kvm', 'git', 'standalone_profiler',
'source_kernel', 'rpm_kernel', 'deb_kernel', 'git_kernel'))
_import_names('autotest_lib.server.subcommand',
('parallel', 'parallel_simple', 'subcommand'))
_import_names('autotest_lib.server.utils',
('run', 'get_tmp_dir', 'sh_escape', 'parse_machine'))
_import_names('autotest_lib.client.common_lib.error')
_import_names('autotest_lib.client.common_lib.barrier', ('barrier',))
# Inject ourself as the job object into other classes within the API.
# (Yuck, this injection is a gross thing be part of a public API. -gps)
#
# XXX Base & SiteAutotest do not appear to use .job. Who does?
namespace['autotest'].Autotest.job = self
# server.hosts.base_classes.Host uses .job.
namespace['hosts'].Host.job = self
namespace['hosts'].factory.ssh_user = self._ssh_user
namespace['hosts'].factory.ssh_port = self._ssh_port
namespace['hosts'].factory.ssh_pass = self._ssh_pass
def _execute_code(self, code_file, namespace, protect=True):
"""
Execute code using a copy of namespace as a server control script.
Unless protect_namespace is explicitly set to False, the dict will not
be modified.
Args:
code_file: The filename of the control file to execute.
namespace: A dict containing names to make available during execution.
protect: Boolean. If True (the default) a copy of the namespace dict
is used during execution to prevent the code from modifying its
contents outside of this function. If False the raw dict is
passed in and modifications will be allowed.
"""
if protect:
namespace = namespace.copy()
self._fill_server_control_namespace(namespace, protect=protect)
# TODO: Simplify and get rid of the special cases for only 1 machine.
if len(self.machines) > 1:
machines_text = '\n'.join(self.machines) + '\n'
# Only rewrite the file if it does not match our machine list.
try:
machines_f = open(MACHINES_FILENAME, 'r')
existing_machines_text = machines_f.read()
machines_f.close()
except EnvironmentError:
existing_machines_text = None
if machines_text != existing_machines_text:
utils.open_write_close(MACHINES_FILENAME, machines_text)
execfile(code_file, namespace, namespace)
def _parse_status(self, new_line):
if not self._using_parser:
return
new_tests = self.parser.process_lines([new_line])
for test in new_tests:
self.__insert_test(test)
def __insert_test(self, test):
"""
An internal method to insert a new test result into the
database. This method will not raise an exception, even if an
error occurs during the insert, to avoid failing a test
simply because of unexpected database issues."""
self.num_tests_run += 1
if status_lib.is_worse_than_or_equal_to(test.status, 'FAIL'):
self.num_tests_failed += 1
try:
self.results_db.insert_test(self.job_model, test)
except Exception:
msg = ("WARNING: An unexpected error occured while "
"inserting test results into the database. "
"Ignoring error.\n" + traceback.format_exc())
print >> sys.stderr, msg
def preprocess_client_state(self):
"""
Produce a state file for initializing the state of a client job.
Creates a new client state file with all the current server state, as
well as some pre-set client state.
@returns The path of the file the state was written into.
"""
# initialize the sysinfo state
self._state.set('client', 'sysinfo', self.sysinfo.serialize())
# dump the state out to a tempfile
fd, file_path = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
# write_to_file doesn't need locking, we exclusively own file_path
self._state.write_to_file(file_path)
return file_path
def postprocess_client_state(self, state_path):
"""
Update the state of this job with the state from a client job.
Updates the state of the server side of a job with the final state
of a client job that was run. Updates the non-client-specific state,
pulls in some specific bits from the client-specific state, and then
discards the rest. Removes the state file afterwards
@param state_file A path to the state file from the client.
"""
# update the on-disk state
try:
self._state.read_from_file(state_path)
os.remove(state_path)
except OSError, e:
# ignore file-not-found errors
if e.errno != errno.ENOENT:
raise
else:
logging.debug('Client state file %s not found', state_path)
# update the sysinfo state
if self._state.has('client', 'sysinfo'):
self.sysinfo.deserialize(self._state.get('client', 'sysinfo'))
# drop all the client-specific state
self._state.discard_namespace('client')
def clear_all_known_hosts(self):
"""Clears known hosts files for all AbstractSSHHosts."""
for host in self.hosts:
if isinstance(host, abstract_ssh.AbstractSSHHost):
host.clear_known_hosts()
class warning_manager(object):
"""Class for controlling warning logs. Manages the enabling and disabling
of warnings."""
def __init__(self):
# a map of warning types to a list of disabled time intervals
self.disabled_warnings = {}
def is_valid(self, timestamp, warning_type):
"""Indicates if a warning (based on the time it occured and its type)
is a valid warning. A warning is considered "invalid" if this type of
warning was marked as "disabled" at the time the warning occured."""
disabled_intervals = self.disabled_warnings.get(warning_type, [])
for start, end in disabled_intervals:
if timestamp >= start and (end is None or timestamp < end):
return False
return True
def disable_warnings(self, warning_type, current_time_func=time.time):
"""As of now, disables all further warnings of this type."""
intervals = self.disabled_warnings.setdefault(warning_type, [])
if not intervals or intervals[-1][1] is not None:
intervals.append((int(current_time_func()), None))
def enable_warnings(self, warning_type, current_time_func=time.time):
"""As of now, enables all further warnings of this type."""
intervals = self.disabled_warnings.get(warning_type, [])
if intervals and intervals[-1][1] is None:
intervals[-1] = (intervals[-1][0], int(current_time_func()))
# load up site-specific code for generating site-specific job data
get_site_job_data = utils.import_site_function(__file__,
"autotest_lib.server.site_server_job", "get_site_job_data",
_get_site_job_data_dummy)
site_server_job = utils.import_site_class(
__file__, "autotest_lib.server.site_server_job", "site_server_job",
base_server_job)
class server_job(site_server_job):
pass
| gpl-2.0 | -7,604,342,849,247,894,000 | 38.549565 | 80 | 0.587551 | false |
odoo-brazil/PySPED | pysped/cte/leiaute/consrecicte_300.py | 1 | 8951 | # -*- coding: utf-8 -*-
from pysped.xml_sped import *
from pysped.cte.leiaute import ESQUEMA_ATUAL_VERSAO_300 as ESQUEMA_ATUAL
import os
from .cte_300 import CTe
DIRNAME = os.path.dirname(__file__)
class ConsReciCTe(XMLNFe):
def __init__(self):
super(ConsReciCTe, self).__init__()
self.versao = TagDecimal(nome='consReciCTe', codigo='BP02', propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='BP03', tamanho=[1, 1, 1] , raiz='//consReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nRec = TagCaracter(nome='nRec' , codigo='BP04', tamanho=[1, 15, 1] , raiz='//consReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consReciCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.nRec.xml
xml += '</consReciCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.nRec.xml = arquivo
return self.xml
xml = property(get_xml, set_xml)
class InfProt(XMLNFe):
def __init__(self):
super(InfProt, self).__init__()
self.Id = TagCaracter(nome='infProt' , codigo='PR04', propriedade='Id' , raiz='/' , obrigatorio=False, namespace=NAMESPACE_CTE)
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='PR05', tamanho=[1, 1, 1], raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.verAplic = TagCaracter(nome='verAplic', codigo='PR06', tamanho=[1, 20] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.chCTe = TagCaracter(nome='chCTe' , codigo='PR07', tamanho=[44, 44] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.dhRecbto = TagDataHora(nome='dhRecbto', codigo='PR08' , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nProt = TagCaracter(nome='nProt' , codigo='PR09', tamanho=[15, 15] , raiz='//infProt', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.digVal = TagCaracter(nome='digVal' , codigo='PR10', tamanho=[28, 28] , raiz='//infProt', obrigatorio=False, namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cStat = TagCaracter(nome='cStat' , codigo='PR11' , tamanho=[1, 3] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='PR12' , tamanho=[1, 255] , raiz='//infProt', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.Id.valor:
xml += self.Id.xml
else:
xml += '<infProt>'
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.chCTe.xml
xml += self.dhRecbto.xml
xml += self.nProt.xml
xml += self.digVal.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += '</infProt>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Id.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.chCTe.xml = arquivo
self.dhRecbto.xml = arquivo
self.nProt.xml = arquivo
self.digVal.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
xml = property(get_xml, set_xml)
class ProtCTe(XMLNFe):
def __init__(self):
super(ProtCTe, self).__init__()
self.versao = TagDecimal(nome='protCTe', codigo='PR02' , propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.infProt = InfProt()
self.Signature = Signature()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.infProt.xml
if len(self.Signature.URI) and (self.Signature.URI.strip() != '#'):
xml += self.Signature.xml
xml += '</protCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
#
# o grupo infProt é usado também no webservice de consulta da situação de uma CT-e
# por isso, a raiz dele não pode ser assumida como sendo sempre o grupo
# protCTe
#
self.infProt.xml = self._le_noh('//protCTe/infProt', ns=NAMESPACE_CTE)
self.Signature.xml = self._le_noh('//protCTe/sig:Signature', ns=NAMESPACE_CTE)
xml = property(get_xml, set_xml)
def protocolo_formatado(self):
if not self.infProt.nProt.valor:
return ''
formatado = self.infProt.nProt.valor
formatado += ' - '
formatado += self.infProt.dhRecbto.formato_danfe()
return formatado
class RetConsReciCTe(XMLNFe):
def __init__(self):
super(RetConsReciCTe, self).__init__()
self.versao = TagDecimal(nome='retConsReciCTe', codigo='BR02' , propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.tpAmb = TagInteiro(nome='tpAmb' , codigo='BR03' , tamanho=[1, 1, 1], raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.verAplic = TagCaracter(nome='verAplic' , codigo='BR04' , tamanho=[1, 20] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.nRec = TagCaracter(nome='nRec' , codigo='BR04a', tamanho=[1, 15, 1] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cStat = TagCaracter(nome='cStat' , codigo='BR05' , tamanho=[1, 3] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='BR06' , tamanho=[1, 255] , raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.cUF = TagCaracter(nome='cUF' , codigo='BR06a', tamanho=[2, 2, 2], raiz='//retConsReciCTe', namespace=NAMESPACE_CTE, namespace_obrigatorio=False)
self.protCTe = []
#
# Dicionário dos protocolos, com a chave sendo a chave de CT-e
#
self.dic_protCTe = {}
#
# Dicionário dos processos (CT-e + protocolo), com a chave sendo a chave da CT-e
#
self.dic_procCTe = {}
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsReciCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.nRec.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
for pn in self.protCTe:
xml += pn.xml
xml += '</retConsReciCTe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.nRec.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
self.protCTe = self.le_grupo('//retConsReciCTe/protCTe', ProtCTe, sigla_ns='cte')
#
# Monta o dicionário dos protocolos
#
for pn in self.protCTe:
self.dic_protCTe[pn.infProt.chCTe.valor] = pn
xml = property(get_xml, set_xml)
class ProcCTe(XMLNFe):
def __init__(self):
super(ProcCTe, self).__init__()
self.versao = TagDecimal(nome='cteProc', propriedade='versao', namespace=NAMESPACE_CTE, valor='3.00', raiz='/')
self.CTe = CTe()
self.protCTe = ProtCTe()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'procCTe_v3.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.CTe.xml.replace(ABERTURA, '')
xml += self.protCTe.xml.replace(ABERTURA, '')
xml += '</cteProc>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.CTe.xml = arquivo
self.protCTe.xml = self._le_noh('//cteProc/protCTe', ns=NAMESPACE_CTE)
xml = property(get_xml, set_xml)
| lgpl-2.1 | 8,531,728,794,076,872,000 | 40.985915 | 180 | 0.587499 | false |
machinalis/django-srd20 | browse/tests.py | 1 | 1842 | from django.test import TestCase
from django.contrib.auth.models import User, Permission
class BrowseTest(TestCase):
fixtures = ['srd.json']
def test_get(self):
response = self.client.get('/browse/spell/alarm/')
# Check that we got a result
self.assertEqual(200, response.status_code)
# Check that the requested spellwas in the context
self.assertEqual('alarm', response.context['spell'].altname)
def test_get_complex_slug(self):
"""This test intended to check that the slug regex is OK and works with dashes and uppercase"""
response = self.client.get('/browse/spell/summon-natures-ally-VI/')
self.assertEqual(200, response.status_code)
self.assertEqual('summon-natures-ally-VI', response.context['spell'].altname)
def test_get_404(self):
response = self.client.get('/browse/spell/does-not-exist/')
# Check that we got a 404 result
self.assertEqual(404, response.status_code)
def test_anonymous_cant_edit(self):
response = self.client.get('/browse/spell/alarm/')
self.assertEqual(200, response.status_code)
self.assertEqual(False, response.context['editable'])
def test_authenticated_cantedit(self):
authenticated, _ = User.objects.get_or_create(
username='testuser',
password='*',
is_staff=True
)
authenticated.set_password('test')
authenticated.user_permissions.add(Permission.objects.get(codename='change_spell', content_type__app_label='srd20'))
authenticated.save()
self.client.login(username='testuser',password='test')
response = self.client.get('/browse/spell/alarm/')
self.assertEqual(200, response.status_code)
self.assertEqual(True, response.context['editable'])
| bsd-3-clause | 7,727,649,877,768,813,000 | 41.837209 | 124 | 0.661781 | false |
jbarascut/blog | pelicanconf.py | 1 | 1230 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Gnupyx'
SITENAME = u'Gnupyx'
SITEURL = 'http://gnupyx.ninja'
PATH = 'content'
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = u'fr'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = True
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
# Custom
USE_CUSTOM_MENU = True
CUSTOM_MENUITEMS = (('Blog', ''),
#('CV', 'pages/cv.html'),
('Contact', 'pages/contact.html'))
CONTACT_EMAIL = "[email protected]"
CONTACTS = (('linkedin', 'https://www.linkedin.com/pub/j%C3%A9r%C3%A9my-barascut/22/107/446'),
('github', 'https://github.com/jbarascut'),)
| gpl-3.0 | 4,497,086,495,078,515,000 | 26.333333 | 94 | 0.630081 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/network_watcher.py | 1 | 2066 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class NetworkWatcher(Resource):
"""Network watcher in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:ivar provisioning_state: The provisioning state of the resource. Possible
values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:vartype provisioning_state: str or
~azure.mgmt.network.v2016_09_01.models.ProvisioningState
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(NetworkWatcher, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.provisioning_state = None
| mit | 8,103,589,322,604,929,000 | 34.016949 | 85 | 0.575508 | false |
RichardLMR/xls2txtISA.NANO.archive | xls2txtISA.NANO.archive.py | 1 | 9430 | '''
xls2txtISA.NANO.archive.py
***********************
The research leading to the development of this program has received funding from the European Union Seventh Framework Programme (FP7/2007-2013) under grant agreement number 309837 (NanoPUZZLES project).
http://wwww.nanopuzzles.eu
************************
######################
#License information##
######################
Copyright (c) 2015 Liverpool John Moores University
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
THIS PROGRAM IS MADE AVAILABLE FOR DISTRIBUTION WITHOUT ANY FORM OF WARRANTY TO THE
EXTENT PERMITTED BY APPLICABLE LAW. THE COPYRIGHT HOLDER PROVIDES THE PROGRAM \"AS IS\"
WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM LIES
WITH THE USER. SHOULD THE PROGRAM PROVE DEFECTIVE IN ANY WAY, THE USER ASSUMES THE
COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. THE COPYRIGHT HOLDER IS NOT
RESPONSIBLE FOR ANY AMENDMENT, MODIFICATION OR OTHER ENHANCEMENT MADE TO THE PROGRAM
BY ANY USER WHO REDISTRIBUTES THE PROGRAM SO AMENDED, MODIFIED OR ENHANCED.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL THE
COPYRIGHT HOLDER BE LIABLE TO ANY USER FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
OR LOSSES SUSTAINED BY THE USER OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO
OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
####################
See also: http://www.gnu.org/licenses/ (last accessed 14/01/2013)
Contact:
1. [email protected]
or if this fails
2. [email protected]
#####################
########
Purpose#
########
To convert a compressed, *flat* archive ("yyyy.zip") populated with ISA-TAB-Nano based ".xls" files, to a corresponding compressed, *flat* archive ("yyyy-txt.zip") of ISA-TAB-Nano based tab delimited text (".txt") files.
N.B. ISA-TAB-Nano is described here:https://wiki.nci.nih.gov/display/ICR/ISA-TAB-Nano
DISCLAIMER: No endorsements from the original ISA-TAB-Nano developers or any other third party organisations should be inferred.
########
Usage #
########
python xls2txtISA.NANO.archive.py -i <absolute name of zip file containing ISA-TAB-Nano files in ".xls" format>
e.g.
python xls2txtISA.NANO.archive.py -i "C:\Work\Investigation.ID.zip"
Options:
-a : modify "Term Accession Numbers" = TRUE (default:FALSE). N.B. If this is set to TRUE, http://purl.bioontology.org/ontology/npo#NPO_1915 would be converted to NPO_1915 etc. This may be required by some ISA-TAB-Nano software programs.
-c : remove all "Comment" rows from the Investigation file. Some ISA-TAB-Nano software programs may not accept these rows.
-N: edit certain fields (or field entries) to be consistent with the latest version of the NanoPUZZLES ISA-TAB-Nano Excel templates
'''
###########################
#######Imports#############
import sys,re,glob,getopt,shutil,os
dir_of_this_file = re.sub('(xls2txtISA\.NANO\.archive\.py)','',os.path.abspath(__file__))
sys.path.append(r'%sutils' % dir_of_this_file)
from zipper import zipUtils
from os_ind_delimiter import delimiter
from xls2txt import changeXls2txt
from fixTxtContents import fixContents
###########################
##########################
########Globals###########
#*************************************
#Fixed
#*************************************
fileNameRegexesDict = {}
fileNameRegexesDict['input_extension'] = re.compile('(\.xls$)')
fileNameRegexesDict['Investigation'] = re.compile('(i_)')
fileNameRegexesDict['Study'] = re.compile('(s_)')
fileNameRegexesDict['Material'] = re.compile('(m_)')
fileNameRegexesDict['Assay'] = re.compile('(a_)')
all_file_types = [key for key in fileNameRegexesDict.keys() if not 'input_extension' == key]
del key
##########################
def extractXlsFolder(xls_archive):
instOfzipUtils = zipUtils(delimiter())
sub_folder_count = instOfzipUtils.archive2folder(xls_archive)
assert 0 == sub_folder_count
return instOfzipUtils.folder_name
def idInputFiles(xls_folder):
instOfzipUtils = zipUtils(delimiter())
input_files = instOfzipUtils.getRelativeFileNames(xls_folder)
input_files = [r'%s%s%s' % (xls_folder,delimiter(),file) for file in input_files if fileNameRegexesDict['input_extension'].search(file)]
del file
del instOfzipUtils
#non_xls_files = [file_name for file_name in input_files if not fileNameRegexesDict['input_extension'].search(file_name)]
#del file_name
#assert 0 == len(non_xls_files),"There are %d non-xls files in the folder %s created from the input archive." % (len(non_xls_files),xls_folder)
#del non_xls_files
fileType2No = {}
for fileType in all_file_types:
fileType2No[fileType] = len([file_name for file_name in input_files if fileNameRegexesDict[fileType].match(file_name.split(delimiter())[-1])])
assert not 0 == fileType2No[fileType], "Zero %s input files in the folder created from the input archive!" % fileType
print "%d %s input files in the folder created from the input archive!" % (fileType2No[fileType],fileType)
del fileType2No
return input_files#,non_xls_files #non_xls_files should just be copied across to the final zip archive without modification -see "def createFlatTxtArchive(xls_folder):"
def createAllTxt(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges):
abs_name_input_files = idInputFiles(xls_folder)
for xls_file in abs_name_input_files:
txt_file = changeXls2txt(xls_file,fileNameRegexesDict['input_extension'])
applicable_standard_file_types = [ft for ft in all_file_types if fileNameRegexesDict[ft].match(txt_file.split(delimiter())[-1])]
del ft
assert 1 >= len(applicable_standard_file_types),"txt_file=%s,applicable_standard_file_types=%s" % (txt_file,str(applicable_standard_file_types))
if 1 == len(applicable_standard_file_types):
current_file_type = applicable_standard_file_types[0]
else:
assert 0 == len(applicable_standard_file_types),"txt_file=%s,applicable_standard_file_types=%s" % (txt_file,str(applicable_standard_file_types))
current_file_type = 'NonStandard'
del applicable_standard_file_types
fixContents(input_file=txt_file,out_name=None,del_intermediates=True,file_type=current_file_type,shouldEditAccessionCodes=mustEditAccessionCodes,shouldRemoveComments=mustRemoveComments,shouldMakeNanoPUZZLESspecificChanges=mustMakeNanoPUZZLESspecificChanges)
def createFlatTxtArchive(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges):
flat_txt_archive = xls_folder+"-txt.zip"
###########
#Rename the output file if non-default options are used
if mustEditAccessionCodes:
flat_txt_archive = re.sub('(\.zip$)','_opt-a.zip',flat_txt_archive)
if mustRemoveComments:
flat_txt_archive = re.sub('(\.zip$)','_opt-c.zip',flat_txt_archive)
if mustMakeNanoPUZZLESspecificChanges:
flat_txt_archive = re.sub('(\.zip$)','_opt-N.zip',flat_txt_archive)
###########
cwd = os.getcwd()
os.chdir(xls_folder)
for xls_file in glob.glob('*.xls'):
os.remove(xls_file)
os.chdir(cwd)
del cwd,xls_file
instOfzipUtils = zipUtils(delimiter_value=delimiter())
instOfzipUtils.filesIntoFlatArchive(folder_name=xls_folder,zip_archive=flat_txt_archive)
del instOfzipUtils
def cleanUp(folder_list):
for folder in folder_list:
cwd = os.getcwd()
os.chdir(folder)
for file in glob.glob('*'):
os.remove(file)
os.chdir(cwd)
os.rmdir(folder)
def main():
#######################
#**********************
#These Boolean variables can be changed from their default values using command line switches
#**********************
mustEditAccessionCodes = False
mustRemoveComments = False
mustMakeNanoPUZZLESspecificChanges = False
#######################
print '-'*50
try:
#############
opts,args = getopt.getopt(sys.argv[1:],'Ncai:',['mustMakeNanoPUZZLESspecificChanges=True','mustRemoveComments=True','mustEditAccessionCodes=True','input='])
for o,v in opts:
if '-i' == o:
xls_archive = r'%s' % re.sub('"','',v)
if '-a' == o:
mustEditAccessionCodes = True
if '-c' == o:
mustRemoveComments = True
if '-N' == o:
mustMakeNanoPUZZLESspecificChanges = True
del o,v,opts,args
#############
except Exception:
print __doc__
sys.exit(1)
print 'Converting:', xls_archive
xls_folder = extractXlsFolder(xls_archive)
createAllTxt(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges)
createFlatTxtArchive(xls_folder,mustEditAccessionCodes,mustRemoveComments,mustMakeNanoPUZZLESspecificChanges)
cleanUp([xls_folder])
print xls_archive, " CONVERTED SUCCESSFULLY"
print '-'*50
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-2.0 | -1,746,001,864,893,949,200 | 37.178138 | 259 | 0.714952 | false |
rmelo19/rmelo19-arduino | fritzing/fritzing.0.9.2b.64.pc/parts/part-gen-scripts/misc_scripts/findfonts.py | 1 | 2245 | # usage:
# findfonts.py -d <directory> -f [font1] -f [font2] ....
#
# <directory> is a folder, with subfolders, containing .svg files. In each svg file in the directory or its children
# look for fonts that aren't in the list
import getopt, sys, os, re
def usage():
print """
usage:
droid.py -d [directory] -f [font1] -f [font2] ...
directory is a folder containing .svg files.
In each svg file in the directory or its subfolders,
look for fonts that aren't in the list
"""
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:f:", ["help", "directory", "font"])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
outputDir = None
fonts = []
for o, a in opts:
#print o
#print a
if o in ("-d", "--directory"):
outputDir = a
elif o in ("-h", "--help"):
usage()
sys.exit(2)
elif o in ("-f", "--font"):
fonts.append(a);
else:
assert False, "unhandled option"
if(not(outputDir)):
usage()
sys.exit(2)
for root, dirs, files in os.walk(outputDir, topdown=False):
for filename in files:
if (filename.endswith(".svg")):
infile = open(os.path.join(root, filename), "r")
svg = infile.read();
infile.close();
matches = re.findall('font-family\\s*=\\s*\"(.+)\"', svg)
listMatches(matches, fonts, root, filename);
matches = re.findall('font-family\\s*:\\s*(.+)[\\";]', svg)
listMatches(matches, fonts, root, filename);
def listMatches(matches, fonts, root, filename):
for m in matches:
gotone = 0
for fontname in fonts:
if (m.find(fontname) >= 0):
gotone = 1;
break;
if not gotone:
print "{0}::{1}".format(os.path.join(root, filename), m)
if __name__ == "__main__":
main()
| gpl-3.0 | -7,906,983,742,202,827,000 | 27.782051 | 120 | 0.498441 | false |
ff0000/red-fab-deploy2 | fab_deploy2/base/collectd.py | 1 | 10192 | import os
import re
from fab_deploy2 import functions
from fab_deploy2.config import CustomConfig
from fab_deploy2.tasks import ServiceContextTask, task_method
from fabric.api import run, sudo, env, put, local
from fabric.contrib.files import append, exists
from jinja2.exceptions import TemplateNotFound
class Collectd(ServiceContextTask):
"""
Sync a collectd config
"""
name = 'setup'
context_name = 'collectd'
namespace = 'collectd'
default_context = {
'template' : 'collectd/collectd.conf',
'base_dir' : '/var/lib/collectd',
'remote_config_path' : '/etc/collectd/collectd.conf',
'plugin_configs' : '/etc/collectd/collectd.d/',
'plugin_path' : '/usr/lib/collectd/',
'types_db' : '/usr/share/collectd/types.db',
'config_section' : 'collectd-receiver',
'interval' : 20,
'base_plugins' : ('cpu', 'load', 'interface', 'logfile', 'df',
'disk','processes','swap', 'tcpconns', 'memory',
'network', 'threshold'),
'package_name' : 'collectd',
'package_names' : None,
'timeout' : 2,
'threads' : 5,
'collectd_tar' : 'collectd-5.4.0'
}
def _add_package(self, name):
raise NotImplementedError()
def _plugin_context(self, plugin):
data = {}
context_func = '{0}_context'.format(plugin)
if hasattr(self, context_func):
data.update(getattr(self, context_func)())
data.update(self._get_plugin_env_context(plugin))
return data
def _get_plugin_env_context(self, plugin):
key = 'collectd_{0}'.format(plugin)
data = env.context.get(key, {})
role = env.host_roles.get(env.host_string)
if role:
role_dict = functions.get_role_context(role)
data.update(role_dict.get(key, {}))
return data
def _install_plugin(self, plugin, context, **kwargs):
task_name = 'collectd.{0}_setup'.format(plugin)
task = functions.get_task_instance(task_name)
package_names = self.package_names
if task:
functions.execute_on_host(task_name, **kwargs)
else:
if package_names and plugin in package_names:
self._add_package(package_names[plugin])
self.render_plugin_configs(plugin, context, **kwargs)
def render_plugin_configs(self, plugin, context, **overides):
if not env.task_context.get(env.host_string + 'pdir'):
sudo('mkdir -p {0}'.format(self.plugin_configs))
env.task_context[env.host_string + 'pdir'] = True
plugin_context = self._plugin_context(plugin)
plugin_context.update(overides)
local_context = dict(context)
local_context[plugin] = plugin_context
plugin_path = None
threshold_path = None
try:
template = plugin_context.get("template",
"collectd/{0}.conf".format(plugin))
plugin_path = functions.render_template(template,
context=local_context)
except TemplateNotFound:
pass
try:
template = plugin_context.get("threshold_template",
"collectd/threshold-{0}.conf").format(plugin)
threshold_path = functions.render_template(template,
context=local_context)
except TemplateNotFound:
pass
if plugin_path:
sudo('ln -sf {0} {1}'.format(plugin_path, self.plugin_configs))
if threshold_path:
path = os.path.join(self.plugin_configs, 'thresholds.conf')
sudo("echo '\n<Plugin \"threshold\">' > {0} && find {1}collectd/ -name threshold-* -exec cat {{}} \; >> {0} && echo '\n</Plugin>\n' >> {0}".format(path, env.configs_path))
@task_method
def setup(self, *args, **kwargs):
self._add_package(self.package_name)
context = self.get_template_context()
template = context[self.context_name].get('template', self.template)
for plugin in self.base_plugins:
self._install_plugin(plugin, context)
context['connection'] = re.sub(r'[^a-zA-Z0-9-]', '-', env.host_string)
context['role'] = env.host_roles.get(env.host_string)
dest_path = functions.render_template(template, self.template,
context=context)
sudo("ln -sf {0} {1}".format(dest_path, self.remote_config_path))
@task_method
def install_plugin(self, plugin, **kwargs):
"""
Installs a plugin only if there is a collectd section in your config
"""
if not env.config_object.has_section(self.config_section):
print "Cowardly refusing to install plugin because there is no {0} section in your servers.ini".format(self.config_section)
elif not exists(self.plugin_path):
print "Cowardly refusing to install plugin because {0} does not exist".format(self.plugin_path)
else:
self._install_plugin(plugin, self.get_template_context(), **kwargs)
@task_method
def install_rrd_receiver(self):
context = self.get_template_context()
self._install_plugin('rrdtool', context)
context['network'] = self._plugin_context('network')
auth_file = functions.render_template("collectd/auth.conf", context=context)
self._install_plugin('network', context, auth_file=auth_file)
def _get_collectd_headers(self):
raise NotImplementedError()
def _add_to_types(self, new_types):
filename = self.types_db
for t in new_types:
append(filename, t, use_sudo=True)
def _gcc_share_args(self):
return ""
@task_method
def redis_setup(self):
"""
The redis that comes with collectd uses credis.
That doesn't work with newer redis versions.
So we install hiredis and compile this version source
and copy the plugin in.
"""
path = self._get_collectd_headers()
context = self._plugin_context('redis')
functions.execute_on_host('hiredis.setup')
run('wget --no-check-certificate {0}'.format(context['plugin_url']))
run("gcc -DHAVE_CONFIG_H -I{0} -I{0}core -Wall -Werror -g -O2 -fPIC -DPIC -o redis.o -c redis.c".format(path))
run('gcc -shared redis.o {0} -Wl,-lhiredis -Wl,-soname -Wl,redis.so -o redis.so'.format(self._gcc_share_args()))
sudo('cp redis.so {0}'.format(self.plugin_path))
self._add_to_types([
'blocked_clients value:GAUGE:0:U',
'changes_since_last_save value:GAUGE:0:U',
'pubsub value:GAUGE:0:U',
'expired_keys value:GAUGE:0:U',
])
run('rm redis.*')
self.render_plugin_configs('redis', self.get_template_context())
@task_method
def haproxy_setup(self):
"""
Compile the haproxy plugin and copy it in.
"""
context = self._plugin_context('haproxy')
path = self._get_collectd_headers()
run('wget --no-check-certificate {0}'.format(context['plugin_url']))
run('gcc -DHAVE_CONFIG_H -I{0} -I{0}core -Wall -Werror -g -O2 -c haproxy.c -fPIC -DPIC -o haproxy.o'.format(path))
run('gcc -shared haproxy.o {0} -Wl,-soname -Wl,haproxy.so -o haproxy.so'.format(self._gcc_share_args()))
sudo('cp haproxy.so {0}'.format(self.plugin_path))
self._add_to_types([
'hap_sessions total:ABSOLUTE:0:U, rate:ABSOLUTE:0:U',
'hap_bytes rx:COUNTER:0:1125899906842623, tx:COUNTER:0:1125899906842623',
'hap_errors req:COUNTER:0:1125899906842623, rsp:COUNTER:0:1125899906842623, con:COUNTER:0:1125899906842623',
'hap_deny req:COUNTER:0:1125899906842623, rsp:COUNTER:0:1125899906842623',
'hap_status value:GAUGE:0:100',
'hap_http_codes 1xx_codes:DERIVE:0:1125899906842623, 2xx_codes:DERIVE:0:1125899906842623, 3xx_codes:DERIVE:0:1125899906842623, 4xx_codes:DERIVE:0:1125899906842623, 5xx_codes:DERIVE:0:1125899906842623 other:DERIVE:0:1125899906842623',
])
run('rm haproxy.*')
self.render_plugin_configs('haproxy', self.get_template_context())
def postgresql_context(self):
data = {
'username' : env.config_object.get('db-server', 'replicator'),
'password' : env.config_object.get('db-server', 'replicator-password')
}
data.update(self._get_plugin_env_context('postgresql'))
if not 'name' in data:
if env.config_object.has_option('db-server', 'db-name'):
database_name = env.config_object.get('db-server', 'db-name')
else:
database_name = None
while not database_name:
database_name = raw_input('Enter your database name: ')
env.config_object.set('db-server', database_name)
data['name'] = database_name
return data
def network_context(self):
if env.config_object.has_section(self.config_section):
return {
'target_host' : env.config_object.get_list(self.config_section,env.config_object.CONNECTIONS),
'username' : env.config_object.get(self.config_section,"username"),
'password' : env.config_object.get(self.config_section,"password")
}
else:
return {}
def rrdtool_context(self):
return {
'location' : os.path.join(self.base_dir, 'rrd'),
'wps' : '100',
'flush' : '120'
}
def haproxy_context(self):
return {
'plugin_url' : 'https://raw.githubusercontent.com/Fotolia/collectd-mod-haproxy/master/haproxy.c',
}
def redis_context(self):
return {
'plugin_url' : 'https://raw.githubusercontent.com/ajdiaz/collectd/hiredis/src/redis.c',
}
def nginx_context(self):
return {
'url' : 'http://localhost/nginx_status'
}
| mit | -3,502,012,099,853,734,400 | 39.444444 | 245 | 0.587323 | false |
peterheim1/robbie_ros | robbie_moveit/nodes/pick_test1.py | 1 | 4531 | #!/usr/bin/env python
import sys
import rospy
from moveit_commander import RobotCommander, MoveGroupCommander
from moveit_commander import PlanningSceneInterface, roscpp_initialize, roscpp_shutdown
from geometry_msgs.msg import PoseStamped
from moveit_msgs.msg import Grasp, GripperTranslation, PlaceLocation
from trajectory_msgs.msg import JointTrajectoryPoint
if __name__=='__main__':
roscpp_initialize(sys.argv)
rospy.init_node('moveit_py_demo', anonymous=True)
GRIPPER_FRAME = 'right_gripper_link'
scene = PlanningSceneInterface()
robot = RobotCommander()
right_arm = MoveGroupCommander("right_arm")
right_gripper = MoveGroupCommander("right_gripper")
#right_arm.set_planner_id("KPIECEkConfigDefault");
rospy.sleep(1)
# clean the scene
scene.remove_world_object("table")
scene.remove_world_object("part")
scene.remove_attached_object(GRIPPER_FRAME, "part")
#rospy.logwarn("cleaning world")
#right_arm.set_named_target("r_start")
#right_arm.go()
#right_gripper.set_named_target("open")
#right_gripper.go()
rospy.sleep(3)
# publish a demo scene
p = PoseStamped()
p.header.frame_id = robot.get_planning_frame()
# add a table
p.pose.position.x = 1.0
p.pose.position.y = 0.2
p.pose.position.z = 0.3
scene.add_box("table", p, (0.7, 1, 0.7))
# add an object to be grasped
p.pose.position.x = 0.4
p.pose.position.y = 0
p.pose.position.z = 0.75
scene.add_box("part", p, (0.07, 0.01, 0.2))
# add a position for placement
p1 = PoseStamped()
p1.header.frame_id = robot.get_planning_frame()
p1.pose.position.x = 0.4
p1.pose.position.y = -0.3
p1.pose.position.z = 0.75
rospy.sleep(1)
#rospy.logwarn("moving to test")
grasps = []
0.67611; 0.0091003; 0.71731
g = Grasp()
g.id = "test"
grasp_pose = PoseStamped()
grasp_pose.header.frame_id = "base_footprint"
grasp_pose.pose.position.x = 0.35
grasp_pose.pose.position.y = -0
grasp_pose.pose.position.z = 0.76
grasp_pose.pose.orientation.x = -0.0209083116076
grasp_pose.pose.orientation.y = -0.00636455547831
grasp_pose.pose.orientation.z = 0.0170413352124
grasp_pose.pose.orientation.w = 0.999615890147
rospy.logwarn("moving to arm")
right_arm.set_pose_target(grasp_pose)
right_arm.go()
rospy.sleep(1)
# set the grasp pose
g.grasp_pose = grasp_pose
# define the pre-grasp approach
g.pre_grasp_approach.direction.header.frame_id = "base_footprint"
g.pre_grasp_approach.direction.vector.x = 0.4
g.pre_grasp_approach.direction.vector.y = -0.0
g.pre_grasp_approach.direction.vector.z = 1.0
g.pre_grasp_approach.min_distance = 0.001
g.pre_grasp_approach.desired_distance = 0.1
g.pre_grasp_posture.header.frame_id = "right_gripper_link"
g.pre_grasp_posture.joint_names = ["right_arm_gripper_joint"]
pos = JointTrajectoryPoint()
pos.positions.append(0.0)
g.pre_grasp_posture.points.append(pos)
# set the grasp posture
g.grasp_posture.header.frame_id = "right_gripper_link"
g.grasp_posture.joint_names = ["right_arm_gripper_joint"]
pos = JointTrajectoryPoint()
pos.positions.append(0.2)
pos.effort.append(0.0)
g.grasp_posture.points.append(pos)
# set the post-grasp retreat
g.post_grasp_retreat.direction.header.frame_id = "base_footprint"
g.post_grasp_retreat.direction.vector.x = 1
g.post_grasp_retreat.direction.vector.y = -1
g.post_grasp_retreat.direction.vector.z = 1
g.post_grasp_retreat.desired_distance = 0.35
g.post_grasp_retreat.min_distance = 0.01
g.allowed_touch_objects = ["table"]
g.max_contact_force = 0
# append the grasp to the list of grasps
grasps.append(g)
# pick the object
#robot.right_arm.pick("part", grasps)
result = False
n_attempts = 0
# repeat until will succeed
while result == False:
result = robot.right_arm.pick("part", grasps)
n_attempts += 1
print "Attempts pickup: ", n_attempts
rospy.sleep(0.2)
rospy.sleep(6)
result1 = False
n_attempts1 = 0
while result1 == False:
result1 = robot.right_arm.place("part",p1)
n_attempts1 += 1
print "Attempts place: ", n_attempts1
rospy.sleep(0.2)
#robot.right_arm.place("part",p)
#right_arm.go()
rospy.sleep(0.1)
rospy.spin()
roscpp_shutdown()
| bsd-3-clause | -1,146,657,331,692,744,300 | 27.859873 | 87 | 0.651291 | false |
MAECProject/maec-to-stix | maec_to_stix/indicator_extractor/config_parser.py | 1 | 9270 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import os
import collections
import json
class ConfigParser(object):
"""Used to parse the JSON indicator extraction configuration files.
Attributes:
config_dict: the parsed dictionary representation of the main configuration file.
supported_actions: the list of supported Actions (names).
supported_objects: a dictionary of supported Objects and their properties.
Args:
config_directory: the path to the directory where the configuration files can be found.
"""
def __init__(self, config_directory=None):
# The path to the directory where the configuration files can be found
self.config_directory = config_directory
# The parsed configuration dictionary
self.config_dict = {}
# List of supported Actions
self.supported_actions = []
# Dictionary of supported Objects and their properties
self.supported_objects = {}
self.parse_config()
def print_config(self):
"""Print the current set of configuration parameters to stdout.
Note:
This method prints detailed information about the parsed Indicator
extraction configuration, including:
1. The general Indicator extraction parameters (from config/extractor_config.json)
2. The supported Actions (derived from all of the parsed JSON configuration files)
3. The supported Objects and their properties (derived from all of the parsed JSON configuration files)
4. The contra-indicators and modifiers to use in candidate Object filtering
"""
# Print the general parameters
print "\n[Configuration Parameters]"
for key, value in self.config_dict.iteritems():
if isinstance(value, bool):
print " {0} : {1}".format(key,value)
elif isinstance(value, dict):
print " {0}".format(key)
for embedded_key, embedded_value in value.iteritems():
print " {0} : {1}".format(embedded_key,embedded_value)
elif isinstance(value, list):
print " {0}".format(key)
for embedded_value in value:
print " {0}".format(embedded_value)
# Print the supported Actions
print "\n[Supported Actions]"
for action_name in sorted(self.supported_actions):
print " {0}".format(action_name)
# Print the supported Objects
print "\n[Supported Objects]"
for object_type in sorted(self.supported_objects):
supported_fields = self.supported_objects[object_type]
print " {0}".format(object_type)
required = supported_fields["required"]
mutually_exclusive_required = supported_fields["mutually_exclusive"]
optional = supported_fields["optional"]
if required:
print " Required Fields"
for field in sorted(required):
print " {0}".format(field)
if mutually_exclusive_required:
print " Mutually Exclusive (Required) Fields"
for field in sorted(mutually_exclusive_required):
print " {0}".format(field)
if optional:
print " Optional Fields"
for field in sorted(optional):
print " {0}".format(field)
def _parse_object_config_dict(self, object_type, config_dict):
"""Parse an Object configuration dictionary."""
flattened_dict = ConfigParser.flatten_dict(config_dict)
for key, config_options in flattened_dict.iteritems():
if config_options["enabled"]:
whitelist = config_options.get('whitelist')
if object_type not in self.supported_objects:
self.supported_objects[object_type] = {"required":{}, "optional":{},
"mutually_exclusive":{}}
object_conf = self.supported_objects[object_type]
if config_options["required"]:
if "mutually_exclusive" in config_options and config_options["mutually_exclusive"]:
object_conf["mutually_exclusive"][key] = whitelist
else:
object_conf["required"][key] = whitelist
else:
object_conf["optional"][key] = whitelist
def _parse_granular_config(self, granular_config_file):
"""Parse a granular JSON configuration structure."""
try:
# Load the default installed configuration file if no directory is specified
if not self.config_directory:
config_filename = os.path.join(os.path.dirname(__file__) , "config", granular_config_file)
# Otherwise, load the specified configuration file
else:
config_filename = os.path.join(self.config_directory, granular_config_file)
with open(config_filename, mode='r') as f:
config = json.loads(f.read())
except EnvironmentError:
print "Error reading configuration file: " + granular_config_file
raise
for config_type, config_values in config.iteritems():
if config_type == "supported objects":
for object_type, properties_dict in config_values.iteritems():
self._parse_object_config_dict(object_type, properties_dict)
elif config_type == "supported actions":
for enum_name, actions_dict in config_values.iteritems():
for action_name, enabled in actions_dict.iteritems():
if enabled:
self.supported_actions.append(action_name)
def parse_config(self):
"""Parse the JSON configuration structure and build the appropriate data structures."""
# Parse and load the configuration file
try:
# Load the default installed configuration file if no directory is specified
if not self.config_directory:
config_filename = os.path.join(os.path.dirname(__file__), "config", "extractor_config.json")
# Otherwise, load the specified configuration file
else:
config_filename = os.path.join(self.config_directory, "extractor_config.json")
with open(config_filename, mode='r') as f:
self.config_dict = json.loads(f.read())
except EnvironmentError:
print "Error reading extractor configuration file."
raise
# Use the granular options structure if specified
if self.config_dict["use_granular_options"]:
self._parse_granular_config("granular_config.json")
else:
abstracted_options = self.config_dict["abstracted_options"]
for option, enabled in abstracted_options.iteritems():
if option == "file_system_activity" and enabled:
self._parse_granular_config("file_system_activity_config.json")
elif option == "registry_activity" and enabled:
self._parse_granular_config("registry_activity_config.json")
elif option == "mutex_activity" and enabled:
self._parse_granular_config("mutex_activity_config.json")
elif option == "process_activity" and enabled:
self._parse_granular_config("process_activity_config.json")
elif option == "service_activity" and enabled:
self._parse_granular_config("service_activity_config.json")
elif option == "network_activity" and enabled:
self._parse_granular_config("network_activity_config.json")
elif option == "driver_activity" and enabled:
self._parse_granular_config("driver_activity_config.json")
@staticmethod
def flatten_dict(d, parent_key='', sep='/'):
"""Flatten a nested dictionary into one with a single set of key/value pairs.
Args:
d: an input dictionary to flatten.
parent_key: the parent_key, for use in building the root key name
when handling nested dictionaries.
sep: the separator to use between the concatenated keys in the root key.
Return:
The flattened representation of the input dictionary.
"""
items = {}
for k, v in d.iteritems():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
if "enabled" not in v and "required" not in v:
items.update(ConfigParser.flatten_dict(v, new_key, sep=sep))
else:
items[new_key] = v
elif isinstance(v, list):
for list_item in v:
items.update(ConfigParser.flatten_dict(list_item, new_key, sep=sep))
else:
items[new_key] = v
return items | bsd-3-clause | -5,422,211,667,800,922,000 | 49.38587 | 119 | 0.588457 | false |
plaes/numpy | numpy/core/code_generators/genapi.py | 1 | 15403 | """
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
if sys.version_info[:2] < (2, 6):
from sets import Set as set
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'methods.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'number.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'usertypes.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'buffer.c'),
join('multiarray', 'datetime.c'),
join('umath', 'ufunc_object.c'),
join('umath', 'loops.c.src'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('intp', 'npy_intp').replace('Bool','npy_bool')
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, (typename, name)):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print filename, lineno+1
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject %(type)s;
#else
NPY_NO_EXPORT PyTypeObject %(type)s;
#endif
""" % {'type': self.name}
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT %(type)s %(name)s;
#else
NPY_NO_EXPORT %(type)s %(name)s;
#endif
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
"""
return astr
class FunctionApi:
def __init__(self, name, index, return_type, args, api_name):
self.name = name
self.index = index
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
astr = """\
NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = d.items()
def cmp(x, y):
return x[1] - y[1]
return sorted(o, cmp=cmp)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
def sorted_by_values(d):
"""Sort a dictionary by its values. Assume the dictionary items is of
the form func_name -> order"""
return sorted(d.items(), key=lambda (x, y): (y, x))
for name, index in sorted_by_values(d):
a.extend(name)
a.extend(str(index))
return md5new(''.join(a)).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid.readlines():
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print func
ah = func.api_hash()
m.update(ah)
print hex(int(ah,16))
print hex(int(m.hexdigest()[:8],16))
if __name__ == '__main__':
main()
| bsd-3-clause | 8,382,274,081,688,541,000 | 31.427368 | 81 | 0.510355 | false |
souravbadami/oppia | core/domain/stats_services_test.py | 1 | 110817 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.stats_services."""
import operator
import os
from core import jobs_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import question_services
from core.domain import stats_domain
from core.domain import stats_jobs_continuous
from core.domain import stats_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
import utils
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
class StatisticsServicesTests(test_utils.GenericTestBase):
"""Test the helper functions and methods defined in the stats_services
module.
"""
def setUp(self):
super(StatisticsServicesTests, self).setUp()
self.exp_id = 'exp_id1'
self.exp_version = 1
self.stats_model_id = (
stats_models.ExplorationStatsModel.create(
'exp_id1', 1, 0, 0, 0, 0, 0, 0, {}))
stats_models.ExplorationIssuesModel.create(
self.exp_id, self.exp_version, [])
self.playthrough_id = stats_models.PlaythroughModel.create(
'exp_id1', 1, 'EarlyQuit', {}, [])
def test_get_exploration_stats_with_new_exp_id(self):
exploration_stats = stats_services.get_exploration_stats(
'new_exp_id', 1)
self.assertEqual(exploration_stats.exp_version, 1)
self.assertEqual(exploration_stats.exp_id, 'new_exp_id')
self.assertEqual(exploration_stats.state_stats_mapping, {})
def test_update_stats_method(self):
"""Test the update_stats method."""
exploration_stats = stats_services.get_exploration_stats_by_id(
'exp_id1', 1)
exploration_stats.state_stats_mapping = {
'Home': stats_domain.StateStats.create_default()
}
stats_services.save_stats_model_transactional(exploration_stats)
# Pass in exploration start event to stats model created in setup
# function.
aggregated_stats = {
'num_starts': 1,
'num_actual_starts': 1,
'num_completions': 1,
'state_stats_mapping': {
'Home': {
'total_hit_count': 1,
'first_hit_count': 1,
'total_answers_count': 1,
'useful_feedback_count': 1,
'num_times_solution_viewed': 1,
'num_completions': 1
}
}
}
stats_services.update_stats(
'exp_id1', 1, aggregated_stats)
exploration_stats = stats_services.get_exploration_stats_by_id(
'exp_id1', 1)
self.assertEqual(exploration_stats.num_starts_v2, 1)
self.assertEqual(exploration_stats.num_actual_starts_v2, 1)
self.assertEqual(exploration_stats.num_completions_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].total_hit_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].first_hit_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].total_answers_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].useful_feedback_count_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].num_completions_v2, 1)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Home'].num_times_solution_viewed_v2, 1)
def test_calls_to_stats_methods(self):
"""Test that calls are being made to the
handle_stats_creation_for_new_exp_version and
handle_stats_creation_for_new_exploration methods when an exploration is
created or updated.
"""
# Initialize call counters.
stats_for_new_exploration_log = test_utils.CallCounter(
stats_services.handle_stats_creation_for_new_exploration)
stats_for_new_exp_version_log = test_utils.CallCounter(
stats_services.handle_stats_creation_for_new_exp_version)
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
with self.swap(
stats_services, 'handle_stats_creation_for_new_exploration',
stats_for_new_exploration_log):
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
# Now, the stats creation for new explorations method will be called
# once and stats creation for new exploration version won't be called.
self.assertEqual(stats_for_new_exploration_log.times_called, 1)
self.assertEqual(stats_for_new_exp_version_log.times_called, 0)
# Update exploration by adding a state.
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state'
})]
with self.swap(
stats_services, 'handle_stats_creation_for_new_exp_version',
stats_for_new_exp_version_log):
exp_services.update_exploration(
feconf.SYSTEM_COMMITTER_ID, exp_id, change_list, '')
# Now, the stats creation for new explorations method will be called
# once and stats creation for new exploration version will also be
# called once.
self.assertEqual(stats_for_new_exploration_log.times_called, 1)
self.assertEqual(stats_for_new_exp_version_log.times_called, 1)
def test_exploration_changes_effect_on_exp_issues_model(self):
"""Test the effect of exploration changes on exploration issues
model.
"""
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
# Exploration is created, exploration issues model must also be created.
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(exp_id, exploration.version)
self.assertEqual(exp_issues.exp_version, exploration.version)
self.assertEqual(exp_issues.unresolved_issues, [])
# Update exploration to next version, exploration issues model also
# created.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': 'New state'
})]
exp_services.update_exploration(
'committer_id_v3', exploration.id, change_list, 'Added new state')
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(exp_id, exploration.version)
self.assertEqual(exp_issues.exp_version, exploration.version)
self.assertEqual(exp_issues.unresolved_issues, [])
# Create a playthrough and assign it to an issue in exploration issues
# model.
playthrough_id1 = stats_models.PlaythroughModel.create(
exploration.id, exploration.version, 'EarlyQuit', {
'state_name': {
'value': 'New state'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
}, [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'New state'
}
},
'schema_version': 1
}])
exp_issue1 = stats_domain.ExplorationIssue.from_dict({
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'New state'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id1],
'schema_version': 1,
'is_valid': True
})
exp_issues.unresolved_issues.append(exp_issue1)
stats_services.save_exp_issues_model_transactional(exp_issues)
# Delete a state.
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_DELETE_STATE,
'state_name': 'New state'
})]
exp_services.update_exploration(
'committer_id_v3', exploration.id, change_list, 'Deleted a state')
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(exp_id, exploration.version)
self.assertEqual(exp_issues.exp_version, exploration.version)
self.assertEqual(exp_issues.unresolved_issues[0].to_dict(), {
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'New state'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id1],
'schema_version': 1,
'is_valid': False
})
# Revert to an older version, exploration issues model also changes.
exp_services.revert_exploration(
'committer_id_v4', exp_id, current_version=3, revert_to_version=2)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(exp_id, exploration.version)
self.assertEqual(exp_issues.exp_version, exploration.version)
self.assertEqual(exp_issues.unresolved_issues[0].to_dict(), {
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'New state'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id1],
'schema_version': 1,
'is_valid': True
})
def test_handle_stats_creation_for_new_exploration(self):
"""Test the handle_stats_creation_for_new_exploration method."""
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
stats_services.handle_stats_creation_for_new_exploration(
exploration.id, exploration.version, exploration.states)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_id, exp_id)
self.assertEqual(exploration_stats.exp_version, 1)
self.assertEqual(exploration_stats.num_starts_v1, 0)
self.assertEqual(exploration_stats.num_starts_v2, 0)
self.assertEqual(exploration_stats.num_actual_starts_v1, 0)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v1, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(
exploration_stats.state_stats_mapping.keys(), ['Home', 'End'])
def test_revert_exploration_creates_stats(self):
"""Test that the revert_exploration method creates stats
for the newest exploration version.
"""
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Save stats for version 1.
exploration_stats = stats_services.get_exploration_stats_by_id(
exp_id, 1)
exploration_stats.num_starts_v2 = 3
exploration_stats.num_actual_starts_v2 = 2
exploration_stats.num_completions_v2 = 1
stats_services.save_stats_model_transactional(exploration_stats)
# Update exploration to next version 2 and its stats.
exp_services.update_exploration(
'committer_id_v2', exploration.id, [], 'Updated')
exploration_stats = stats_services.get_exploration_stats_by_id(
exp_id, 2)
exploration_stats.num_starts_v2 = 4
exploration_stats.num_actual_starts_v2 = 3
exploration_stats.num_completions_v2 = 2
stats_services.save_stats_model_transactional(exploration_stats)
# Revert to an older version.
exp_services.revert_exploration(
'committer_id_v3', exp_id, current_version=2, revert_to_version=1)
exploration_stats = stats_services.get_exploration_stats_by_id(
exp_id, 3
)
self.assertIsNotNone(exploration_stats)
self.assertEqual(exploration_stats.num_starts_v2, 3)
self.assertEqual(exploration_stats.num_actual_starts_v2, 2)
self.assertEqual(exploration_stats.num_completions_v2, 1)
def test_handle_stats_creation_for_new_exp_version(self):
"""Test the handle_stats_creation_for_new_exp_version method."""
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Test addition of states.
exploration.add_states(['New state', 'New state 2'])
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_id, exp_id)
self.assertEqual(exploration_stats.exp_version, 2)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()), set([
'Home', 'New state 2', 'End', 'New state']))
self.assertEqual(
exploration_stats.state_stats_mapping['New state'].to_dict(),
stats_domain.StateStats.create_default().to_dict())
self.assertEqual(
exploration_stats.state_stats_mapping['New state 2'].to_dict(),
stats_domain.StateStats.create_default().to_dict())
# Test renaming of states.
exploration.rename_state('New state 2', 'Renamed state')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 3)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()), set([
'Home', 'End', 'Renamed state', 'New state']))
# Test deletion of states.
exploration.delete_state('New state')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 4)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()),
set(['Home', 'Renamed state', 'End']))
# Test addition, renaming and deletion of states.
exploration.add_states(['New state 2'])
exploration.rename_state('New state 2', 'Renamed state 2')
exploration.delete_state('Renamed state 2')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 5)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()),
set(['Home', 'End', 'Renamed state']))
# Test addition and multiple renames.
exploration.add_states(['New state 2'])
exploration.rename_state('New state 2', 'New state 3')
exploration.rename_state('New state 3', 'New state 4')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'New state 3'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 3',
'new_state_name': 'New state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 6)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()),
set(['Home', 'New state 4', 'Renamed state', 'End']))
# Set some values for the the stats in the ExplorationStatsModel
# instance.
exploration_stats_model = stats_models.ExplorationStatsModel.get_model(
exploration.id, exploration.version)
exploration_stats_model.num_actual_starts_v2 = 5
exploration_stats_model.num_completions_v2 = 2
exploration_stats_model.state_stats_mapping['New state 4'][
'total_answers_count_v2'] = 12
exploration_stats_model.state_stats_mapping['Home'][
'total_hit_count_v2'] = 8
exploration_stats_model.state_stats_mapping['Renamed state'][
'first_hit_count_v2'] = 2
exploration_stats_model.state_stats_mapping['End'][
'useful_feedback_count_v2'] = 4
exploration_stats_model.put()
# Test deletion, addition and rename.
exploration.delete_state('New state 4')
exploration.add_states(['New state'])
exploration.rename_state('New state', 'New state 4')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state 4'
}), exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=exp_versions_diff, revert_to_version=None)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 7)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()),
set(['Home', 'New state 4', 'Renamed state', 'End']))
# Test the values of the stats carried over from the last version.
self.assertEqual(exploration_stats.num_actual_starts_v2, 5)
self.assertEqual(exploration_stats.num_completions_v2, 2)
self.assertEqual(
exploration_stats.state_stats_mapping['Home'].total_hit_count_v2, 8)
self.assertEqual(
exploration_stats.state_stats_mapping[
'Renamed state'].first_hit_count_v2, 2)
self.assertEqual(
exploration_stats.state_stats_mapping[
'End'].useful_feedback_count_v2, 4)
# State 'New state 4' has been deleted and recreated, so it should
# now contain default values for stats instead of the values it
# contained in the last version.
self.assertEqual(
exploration_stats.state_stats_mapping[
'New state 4'].total_answers_count_v2, 0)
# Test reverts.
exploration.version += 1
stats_services.handle_stats_creation_for_new_exp_version(
exploration.id, exploration.version, exploration.states,
exp_versions_diff=None, revert_to_version=5)
exploration_stats = stats_services.get_exploration_stats_by_id(
exploration.id, exploration.version)
self.assertEqual(exploration_stats.exp_version, 8)
self.assertEqual(
set(exploration_stats.state_stats_mapping.keys()),
set(['Home', 'Renamed state', 'End']))
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
def test_create_exp_issues_for_new_exploration(self):
"""Test the create_exp_issues_for_new_exploration method."""
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
stats_services.create_exp_issues_for_new_exploration(
exploration.id, exploration.version)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_id, exploration.id)
self.assertEqual(exp_issues.exp_version, exploration.version)
self.assertEqual(exp_issues.unresolved_issues, [])
def test_update_exp_issues_for_new_exp_version(self):
"""Test the update_exp_issues_for_new_exp_version method."""
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_version, 1)
self.assertEqual(exp_issues.unresolved_issues, [])
# Create playthrough instances for this version.
playthrough_id1 = stats_models.PlaythroughModel.create(
exploration.id, exploration.version, 'EarlyQuit', {
'state_name': {
'value': 'Home'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
}, [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'Home'
}
},
'schema_version': 1
}])
playthrough_id2 = stats_models.PlaythroughModel.create(
exploration.id, exploration.version, 'EarlyQuit', {
'state_name': {
'value': 'End'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
}, [{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'End'
}
},
'schema_version': 1
}])
exp_issue1 = stats_domain.ExplorationIssue.from_dict({
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'Home'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id1],
'schema_version': 1,
'is_valid': True
})
exp_issue2 = stats_domain.ExplorationIssue.from_dict({
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'End'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': [playthrough_id2],
'schema_version': 1,
'is_valid': True
})
exp_issues.unresolved_issues = [exp_issue1, exp_issue2]
stats_services.save_exp_issues_model_transactional(exp_issues)
# Test renaming of states.
exploration.rename_state('Home', 'Renamed state')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.update_exp_issues_for_new_exp_version(
exploration, exp_versions_diff, False)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_version, 2)
self.assertEqual(
exp_issues.unresolved_issues[0].issue_customization_args[
'state_name']['value'], 'Renamed state')
self.assertEqual(
exp_issues.unresolved_issues[1].issue_customization_args[
'state_name']['value'], 'End')
playthrough1_instance = stats_models.PlaythroughModel.get(
playthrough_id1)
self.assertEqual(
playthrough1_instance.issue_customization_args['state_name'][
'value'], 'Renamed state')
self.assertEqual(
playthrough1_instance.actions[0]['action_customization_args'][
'state_name']['value'],
'Renamed state')
playthrough2_instance = stats_models.PlaythroughModel.get(
playthrough_id2)
self.assertEqual(
playthrough2_instance.issue_customization_args['state_name'][
'value'], 'End')
self.assertEqual(
playthrough2_instance.actions[0]['action_customization_args'][
'state_name']['value'],
'End')
# Test deletion of states.
exploration.delete_state('End')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'End'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.update_exp_issues_for_new_exp_version(
exploration, exp_versions_diff, False)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_version, 3)
self.assertEqual(
exp_issues.unresolved_issues[0].issue_customization_args[
'state_name']['value'], 'Renamed state')
self.assertEqual(
exp_issues.unresolved_issues[1].issue_customization_args[
'state_name']['value'], 'End')
self.assertEqual(
exp_issues.unresolved_issues[1].is_valid, False)
playthrough1_instance = stats_models.PlaythroughModel.get(
playthrough_id1)
self.assertEqual(
playthrough1_instance.issue_customization_args['state_name'][
'value'], 'Renamed state')
self.assertEqual(
playthrough1_instance.actions[0]['action_customization_args'][
'state_name']['value'],
'Renamed state')
playthrough2_instance = stats_models.PlaythroughModel.get(
playthrough_id2)
self.assertEqual(
playthrough2_instance.issue_customization_args['state_name'][
'value'], 'End')
self.assertEqual(
playthrough2_instance.actions[0]['action_customization_args'][
'state_name']['value'],
'End')
def test_get_playthroughs_multi(self):
"""Test the get_playthroughs_multi method."""
playthrough_id1 = stats_models.PlaythroughModel.create(
self.exp_id, 1, 'EarlyQuit', {}, [])
playthrough_id2 = stats_models.PlaythroughModel.create(
self.exp_id, 1, 'EarlyQuit', {}, [])
playthroughs = stats_services.get_playthroughs_multi(
[playthrough_id1, playthrough_id2])
self.assertEqual(playthroughs[0].exp_id, self.exp_id)
self.assertEqual(playthroughs[0].exp_version, 1)
self.assertEqual(playthroughs[0].issue_type, 'EarlyQuit')
self.assertEqual(playthroughs[0].issue_customization_args, {})
self.assertEqual(playthroughs[0].actions, [])
self.assertEqual(playthroughs[1].exp_id, self.exp_id)
self.assertEqual(playthroughs[1].exp_version, 1)
self.assertEqual(playthroughs[1].issue_type, 'EarlyQuit')
self.assertEqual(playthroughs[1].issue_customization_args, {})
self.assertEqual(playthroughs[1].actions, [])
def test_update_playthroughs_multi(self):
"""Test the update_playthroughs_multi method."""
playthrough_id1 = stats_models.PlaythroughModel.create(
self.exp_id, 1, 'EarlyQuit', {}, [])
playthrough_id2 = stats_models.PlaythroughModel.create(
self.exp_id, 1, 'EarlyQuit', {}, [])
playthroughs = stats_services.get_playthroughs_multi(
[playthrough_id1, playthrough_id2])
playthroughs[0].issue_type = 'MultipleIncorrectSubmissions'
playthroughs[1].issue_type = 'CyclicStateTransitions'
stats_services.update_playthroughs_multi(
[playthrough_id1, playthrough_id2], playthroughs)
playthroughs = stats_services.get_playthroughs_multi(
[playthrough_id1, playthrough_id2])
self.assertEqual(playthroughs[0].exp_id, self.exp_id)
self.assertEqual(playthroughs[0].exp_version, 1)
self.assertEqual(
playthroughs[0].issue_type, 'MultipleIncorrectSubmissions')
self.assertEqual(playthroughs[0].issue_customization_args, {})
self.assertEqual(playthroughs[0].actions, [])
self.assertEqual(playthroughs[1].exp_id, self.exp_id)
self.assertEqual(playthroughs[1].exp_version, 1)
self.assertEqual(playthroughs[1].issue_type, 'CyclicStateTransitions')
self.assertEqual(playthroughs[1].issue_customization_args, {})
self.assertEqual(playthroughs[1].actions, [])
def test_create_exp_issues_model(self):
"""Test the create_exp_issues_model method."""
exp_issues = stats_domain.ExplorationIssues(self.exp_id, 1, [])
stats_services.create_exp_issues_model(exp_issues)
exp_issues_instance = stats_models.ExplorationIssuesModel.get_model(
self.exp_id, 1)
self.assertEqual(exp_issues_instance.exp_id, self.exp_id)
self.assertEqual(exp_issues_instance.exp_version, 1)
self.assertEqual(exp_issues_instance.unresolved_issues, [])
def test_get_exp_issues_from_model(self):
"""Test the get_exp_issues_from_model method."""
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
exp_issues = stats_services.get_exp_issues_from_model(model)
self.assertEqual(exp_issues.exp_id, self.exp_id)
self.assertEqual(exp_issues.exp_version, 1)
self.assertEqual(exp_issues.unresolved_issues, [])
def test_get_exploration_stats_from_model(self):
"""Test the get_exploration_stats_from_model method."""
model = stats_models.ExplorationStatsModel.get(self.stats_model_id)
exploration_stats = stats_services.get_exploration_stats_from_model(
model)
self.assertEqual(exploration_stats.exp_id, 'exp_id1')
self.assertEqual(exploration_stats.exp_version, 1)
self.assertEqual(exploration_stats.num_starts_v1, 0)
self.assertEqual(exploration_stats.num_starts_v2, 0)
self.assertEqual(exploration_stats.num_actual_starts_v1, 0)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v1, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(exploration_stats.state_stats_mapping, {})
def test_get_playthrough_from_model(self):
"""Test the get_playthrough_from_model method."""
model = stats_models.PlaythroughModel.get(self.playthrough_id)
playthrough = stats_services.get_playthrough_from_model(model)
self.assertEqual(playthrough.exp_id, 'exp_id1')
self.assertEqual(playthrough.exp_version, 1)
self.assertEqual(playthrough.issue_type, 'EarlyQuit')
self.assertEqual(playthrough.issue_customization_args, {})
self.assertEqual(playthrough.actions, [])
def test_get_exp_issues_by_id(self):
"""Test the get_exp_issues_by_id method."""
exp_issues = stats_services.get_exp_issues(self.exp_id, 1)
self.assertEqual(exp_issues.exp_id, self.exp_id)
self.assertEqual(exp_issues.exp_version, 1)
self.assertEqual(exp_issues.unresolved_issues, [])
def test_get_playthrough_by_id(self):
"""Test the get_playthrough_by_id method."""
playthrough = stats_services.get_playthrough_by_id(self.playthrough_id)
self.assertEqual(playthrough.exp_id, 'exp_id1')
self.assertEqual(playthrough.exp_version, 1)
self.assertEqual(playthrough.issue_type, 'EarlyQuit')
self.assertEqual(playthrough.issue_customization_args, {})
self.assertEqual(playthrough.actions, [])
def test_get_exploration_stats_by_id(self):
"""Test the get_exploration_stats_by_id method."""
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version)
self.assertEqual(exploration_stats.exp_id, 'exp_id1')
self.assertEqual(exploration_stats.exp_version, 1)
self.assertEqual(exploration_stats.num_starts_v1, 0)
self.assertEqual(exploration_stats.num_starts_v2, 0)
self.assertEqual(exploration_stats.num_actual_starts_v1, 0)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v1, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(exploration_stats.state_stats_mapping, {})
def test_create_stats_model(self):
"""Test the create_stats_model method."""
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version)
exploration_stats.exp_version += 1
model_id = stats_services.create_stats_model(exploration_stats)
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version + 1)
self.assertEqual(exploration_stats.exp_id, 'exp_id1')
self.assertEqual(exploration_stats.exp_version, 2)
self.assertEqual(exploration_stats.num_starts_v1, 0)
self.assertEqual(exploration_stats.num_starts_v2, 0)
self.assertEqual(exploration_stats.num_actual_starts_v1, 0)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v1, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(exploration_stats.state_stats_mapping, {})
# Test create method with different state_stats_mapping.
exploration_stats.state_stats_mapping = {
'Home': stats_domain.StateStats.create_default()
}
exploration_stats.exp_version += 1
model_id = stats_services.create_stats_model(exploration_stats)
model = stats_models.ExplorationStatsModel.get(model_id)
self.assertEqual(model.exp_id, 'exp_id1')
self.assertEqual(model.exp_version, 3)
self.assertEqual(exploration_stats.num_starts_v1, 0)
self.assertEqual(exploration_stats.num_starts_v2, 0)
self.assertEqual(exploration_stats.num_actual_starts_v1, 0)
self.assertEqual(exploration_stats.num_actual_starts_v2, 0)
self.assertEqual(exploration_stats.num_completions_v1, 0)
self.assertEqual(exploration_stats.num_completions_v2, 0)
self.assertEqual(
model.state_stats_mapping, {
'Home': {
'total_answers_count_v1': 0,
'total_answers_count_v2': 0,
'useful_feedback_count_v1': 0,
'useful_feedback_count_v2': 0,
'total_hit_count_v1': 0,
'total_hit_count_v2': 0,
'first_hit_count_v1': 0,
'first_hit_count_v2': 0,
'num_times_solution_viewed_v2': 0,
'num_completions_v1': 0,
'num_completions_v2': 0
}
})
def test_save_exp_issues_model_transactional(self):
"""Test the save_exp_issues_model_transactional method."""
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
exp_issues = stats_services.get_exp_issues_from_model(model)
exp_issues.unresolved_issues.append(
stats_domain.ExplorationIssue.from_dict({
'issue_type': 'EarlyQuit',
'issue_customization_args': {
'state_name': {
'value': 'state_name1'
},
'time_spent_in_exp_in_msecs': {
'value': 200
}
},
'playthrough_ids': ['playthrough_id1'],
'schema_version': 1,
'is_valid': True
}))
stats_services.save_exp_issues_model_transactional(exp_issues)
model = stats_models.ExplorationIssuesModel.get_model(self.exp_id, 1)
self.assertEqual(
model.unresolved_issues[0],
exp_issues.unresolved_issues[0].to_dict())
def test_save_stats_model_transactional(self):
"""Test the save_stats_model_transactional method."""
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version)
exploration_stats.num_starts_v2 += 15
exploration_stats.num_actual_starts_v2 += 5
exploration_stats.num_completions_v2 += 2
stats_services.save_stats_model_transactional(exploration_stats)
exploration_stats = stats_services.get_exploration_stats_by_id(
self.exp_id, self.exp_version)
self.assertEqual(exploration_stats.num_starts_v2, 15)
self.assertEqual(exploration_stats.num_actual_starts_v2, 5)
self.assertEqual(exploration_stats.num_completions_v2, 2)
def test_get_exploration_stats_multi(self):
"""Test the get_exploration_stats_multi method."""
stats_models.ExplorationStatsModel.create(
'exp_id2', 2, 10, 0, 0, 0, 0, 0, {})
exp_version_references = [
exp_domain.ExpVersionReference(self.exp_id, self.exp_version),
exp_domain.ExpVersionReference('exp_id2', 2)]
exp_stats_list = stats_services.get_exploration_stats_multi(
exp_version_references)
self.assertEqual(len(exp_stats_list), 2)
self.assertEqual(exp_stats_list[0].exp_id, self.exp_id)
self.assertEqual(exp_stats_list[0].exp_version, self.exp_version)
self.assertEqual(exp_stats_list[1].exp_id, 'exp_id2')
self.assertEqual(exp_stats_list[1].exp_version, 2)
def test_update_exp_issues_for_new_exp_version_with_cyclic_state_transition(
self):
# Create exploration object in datastore.
exp_id = 'exp_id'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_version, 1)
self.assertEqual(exp_issues.unresolved_issues, [])
# Create playthrough instances for this version.
playthrough_id = stats_models.PlaythroughModel.create(
exp_id, exploration.version, 'CyclicStateTransitions',
{
'state_names': {
'value': ['Home', 'End', 'Home']
},
},
[{
'action_type': 'ExplorationStart',
'action_customization_args': {
'state_name': {
'value': 'Home'
}
},
'schema_version': 1
}])
exp_issue = stats_domain.ExplorationIssue.from_dict({
'issue_type': 'CyclicStateTransitions',
'issue_customization_args': {
'state_names': {
'value': ['Home', 'End', 'Home']
},
},
'playthrough_ids': [playthrough_id],
'schema_version': 1,
'is_valid': True
})
exp_issues.unresolved_issues = [exp_issue]
stats_services.save_exp_issues_model_transactional(exp_issues)
self.assertEqual(
exp_issues.unresolved_issues[0].issue_customization_args[
'state_names']['value'], ['Home', 'End', 'Home'])
playthrough_instance = stats_models.PlaythroughModel.get(
playthrough_id)
self.assertEqual(
playthrough_instance.issue_customization_args['state_names'][
'value'], ['Home', 'End', 'Home'])
self.assertEqual(
playthrough_instance.actions[0]['action_customization_args'][
'state_name']['value'], 'Home')
# Test renaming of states.
exploration.rename_state('Home', 'Renamed state')
exploration.version += 1
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
stats_services.update_exp_issues_for_new_exp_version(
exploration, exp_versions_diff, False)
exp_issues = stats_services.get_exp_issues(
exploration.id, exploration.version)
self.assertEqual(exp_issues.exp_version, 2)
self.assertEqual(
exp_issues.unresolved_issues[0].issue_customization_args[
'state_names']['value'], [
'Renamed state', 'End', 'Renamed state'])
playthrough_instance = stats_models.PlaythroughModel.get(
playthrough_id)
self.assertEqual(
playthrough_instance.issue_customization_args['state_names'][
'value'], ['Renamed state', 'End', 'Renamed state'])
self.assertEqual(
playthrough_instance.actions[0]['action_customization_args'][
'state_name']['value'],
'Renamed state')
def test_get_multiple_exploration_stats_by_version_with_invalid_exp_id(
self):
exp_stats = stats_services.get_multiple_exploration_stats_by_version(
'invalid_exp_id', [1])
self.assertEqual(exp_stats, [None])
def test_get_exploration_stats_multi_with_invalid_exp_id(self):
exp_version_references = [
exp_domain.ExpVersionReference('exp_id_1', 1),
exp_domain.ExpVersionReference('exp_id_2', 2)]
exploration_stats_models = (
stats_models.ExplorationStatsModel.get_multi_stats_models(
exp_version_references))
self.assertEqual(exploration_stats_models, [None, None])
exp_stats_list = stats_services.get_exploration_stats_multi(
exp_version_references)
self.assertEqual(len(exp_stats_list), 2)
self.assertEqual(exp_stats_list[0].exp_id, 'exp_id_1')
self.assertEqual(exp_stats_list[0].exp_version, 1)
self.assertEqual(exp_stats_list[1].exp_id, 'exp_id_2')
self.assertEqual(exp_stats_list[1].exp_version, 2)
class MockInteractionAnswerSummariesAggregator(
stats_jobs_continuous.InteractionAnswerSummariesAggregator):
"""A modified InteractionAnswerSummariesAggregator that does not start
a new batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockInteractionAnswerSummariesMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockInteractionAnswerSummariesMRJobManager(
stats_jobs_continuous.InteractionAnswerSummariesMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockInteractionAnswerSummariesAggregator
class EventLogEntryTests(test_utils.GenericTestBase):
"""Test for the event log creation."""
def test_create_events(self):
"""Basic test that makes sure there are no exceptions thrown."""
event_services.StartExplorationEventHandler.record(
'eid', 2, 'state', 'session', {}, feconf.PLAY_TYPE_NORMAL)
event_services.MaybeLeaveExplorationEventHandler.record(
'eid', 2, 'state', 'session', 27.2, {}, feconf.PLAY_TYPE_NORMAL)
class AnswerEventTests(test_utils.GenericTestBase):
"""Test recording new answer operations through events."""
SESSION_ID = 'SESSION_ID'
TIME_SPENT = 5.0
PARAMS = {}
def test_record_answer(self):
self.save_new_default_exploration('eid', '[email protected]')
exp = exp_fetchers.get_exploration_by_id('eid')
first_state_name = exp.init_state_name
second_state_name = 'State 2'
third_state_name = 'State 3'
exp_services.update_exploration('[email protected]', 'eid', [
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': first_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput',
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': second_state_name,
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_ADD_STATE,
'state_name': third_state_name,
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': second_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'TextInput',
}), exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': third_state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': 'Continue',
})], 'Add new state')
exp = exp_fetchers.get_exploration_by_id('eid')
exp_version = exp.version
for state_name in [first_state_name, second_state_name]:
state_answers = stats_services.get_state_answers(
'eid', exp_version, state_name)
self.assertEqual(state_answers, None)
# Answer is a string.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, first_state_name, 'TextInput', 0, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
self.PARAMS, 'answer1')
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, first_state_name, 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid2', self.TIME_SPENT,
self.PARAMS, 'answer1')
# Answer is a dict.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, first_state_name, 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
self.PARAMS, {'x': 1.0, 'y': 5.0})
# Answer is a number.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, first_state_name, 'TextInput', 2, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
self.PARAMS, 10)
# Answer is a list of dicts.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, first_state_name, 'TextInput', 3, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
self.PARAMS, [{'a': 'some', 'b': 'text'}, {'a': 1.0, 'c': 2.0}])
# Answer is a list.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, second_state_name, 'TextInput', 2, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid3', self.TIME_SPENT,
self.PARAMS, [2, 4, 8])
# Answer is a unicode string.
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, second_state_name, 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid4', self.TIME_SPENT,
self.PARAMS, self.UNICODE_TEST_STRING)
# Answer is None (such as for Continue).
event_services.AnswerSubmissionEventHandler.record(
'eid', exp_version, third_state_name, 'Continue', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid5', self.TIME_SPENT,
self.PARAMS, None)
expected_submitted_answer_list1 = [{
'answer': 'answer1', 'time_spent_in_sec': 5.0,
'answer_group_index': 0, 'rule_spec_index': 0,
'classification_categorization': 'explicit', 'session_id': 'sid1',
'interaction_id': 'TextInput', 'params': {}
}, {
'answer': 'answer1', 'time_spent_in_sec': 5.0,
'answer_group_index': 0, 'rule_spec_index': 1,
'classification_categorization': 'explicit', 'session_id': 'sid2',
'interaction_id': 'TextInput', 'params': {}
}, {
'answer': {'x': 1.0, 'y': 5.0}, 'time_spent_in_sec': 5.0,
'answer_group_index': 1, 'rule_spec_index': 0,
'classification_categorization': 'explicit', 'session_id': 'sid1',
'interaction_id': 'TextInput', 'params': {}
}, {
'answer': 10, 'time_spent_in_sec': 5.0, 'answer_group_index': 2,
'rule_spec_index': 0, 'classification_categorization': 'explicit',
'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {}
}, {
'answer': [{'a': 'some', 'b': 'text'}, {'a': 1.0, 'c': 2.0}],
'time_spent_in_sec': 5.0, 'answer_group_index': 3,
'rule_spec_index': 0, 'classification_categorization': 'explicit',
'session_id': 'sid1', 'interaction_id': 'TextInput', 'params': {}
}]
expected_submitted_answer_list2 = [{
'answer': [2, 4, 8], 'time_spent_in_sec': 5.0,
'answer_group_index': 2, 'rule_spec_index': 0,
'classification_categorization': 'explicit', 'session_id': 'sid3',
'interaction_id': 'TextInput', 'params': {}
}, {
'answer': self.UNICODE_TEST_STRING, 'time_spent_in_sec': 5.0,
'answer_group_index': 1, 'rule_spec_index': 1,
'classification_categorization': 'explicit', 'session_id': 'sid4',
'interaction_id': 'TextInput', 'params': {}
}]
expected_submitted_answer_list3 = [{
'answer': None, 'time_spent_in_sec': 5.0, 'answer_group_index': 1,
'rule_spec_index': 1, 'classification_categorization': 'explicit',
'session_id': 'sid5', 'interaction_id': 'Continue', 'params': {}
}]
state_answers = stats_services.get_state_answers(
'eid', exp_version, first_state_name)
self.assertEqual(
state_answers.get_submitted_answer_dict_list(),
expected_submitted_answer_list1)
state_answers = stats_services.get_state_answers(
'eid', exp_version, second_state_name)
self.assertEqual(
state_answers.get_submitted_answer_dict_list(),
expected_submitted_answer_list2)
state_answers = stats_services.get_state_answers(
'eid', exp_version, third_state_name)
self.assertEqual(
state_answers.get_submitted_answer_dict_list(),
expected_submitted_answer_list3)
class RecordAnswerTests(test_utils.GenericTestBase):
"""Tests for functionality related to recording and retrieving answers."""
EXP_ID = 'exp_id0'
def setUp(self):
super(RecordAnswerTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
def test_record_answer_without_retrieving_it_first(self):
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'first answer', 'TextInput', 0,
0, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 1.0))
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': 'first answer',
'time_spent_in_sec': 1.0,
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}])
def test_record_and_retrieve_single_answer(self):
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertIsNone(state_answers)
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'some text', 'TextInput', 0,
1, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 10.0))
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.exploration_id, 'exp_id0')
self.assertEqual(state_answers.exploration_version, 1)
self.assertEqual(
state_answers.state_name, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(state_answers.interaction_id, 'TextInput')
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': 'some text',
'time_spent_in_sec': 10.0,
'answer_group_index': 0,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}])
def test_record_and_retrieve_single_answer_with_preexisting_entry(self):
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'first answer', 'TextInput', 0,
0, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 1.0))
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': 'first answer',
'time_spent_in_sec': 1.0,
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}])
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'some text', 'TextInput', 0,
1, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 10.0))
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.exploration_id, 'exp_id0')
self.assertEqual(state_answers.exploration_version, 1)
self.assertEqual(
state_answers.state_name, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(state_answers.interaction_id, 'TextInput')
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': 'first answer',
'time_spent_in_sec': 1.0,
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'some text',
'time_spent_in_sec': 10.0,
'answer_group_index': 0,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}])
def test_record_many_answers(self):
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertIsNone(state_answers)
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer a', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 10.0),
stats_domain.SubmittedAnswer(
'answer ccc', 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 3.0),
stats_domain.SubmittedAnswer(
'answer bbbbb', 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 7.5),
]
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list)
# The order of the answers returned depends on the size of the answers.
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.exploration_id, 'exp_id0')
self.assertEqual(state_answers.exploration_version, 1)
self.assertEqual(
state_answers.state_name, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(state_answers.interaction_id, 'TextInput')
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': 'answer a',
'time_spent_in_sec': 10.0,
'answer_group_index': 0,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'answer ccc',
'time_spent_in_sec': 3.0,
'answer_group_index': 1,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'answer bbbbb',
'time_spent_in_sec': 7.5,
'answer_group_index': 1,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}])
def test_record_answers_exceeding_one_shard(self):
# Use a smaller max answer list size so less answers are needed to
# exceed a shard.
with self.swap(
stats_models.StateAnswersModel, '_MAX_ANSWER_LIST_BYTE_SIZE',
100000):
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertIsNone(state_answers)
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer a', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v',
10.0),
stats_domain.SubmittedAnswer(
'answer ccc', 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v',
3.0),
stats_domain.SubmittedAnswer(
'answer bbbbb', 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v',
7.5),
]
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list * 200)
# Verify that more than 1 shard was stored. The index shard
# (shard_id 0) is not included in the shard count.
master_model = stats_models.StateAnswersModel.get_master_model(
self.exploration.id, self.exploration.version,
self.exploration.init_state_name)
self.assertGreater(master_model.shard_count, 0)
# The order of the answers returned depends on the size of the
# answers.
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.exploration_id, 'exp_id0')
self.assertEqual(state_answers.exploration_version, 1)
self.assertEqual(
state_answers.state_name, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(state_answers.interaction_id, 'TextInput')
self.assertEqual(
len(state_answers.get_submitted_answer_dict_list()), 600)
def test_record_many_answers_with_preexisting_entry(self):
stats_services.record_answer(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
stats_domain.SubmittedAnswer(
'1 answer', 'TextInput', 0,
0, exp_domain.EXPLICIT_CLASSIFICATION, {},
'a_session_id_val', 1.0))
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': '1 answer',
'time_spent_in_sec': 1.0,
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}])
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer aaa', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 10.0),
stats_domain.SubmittedAnswer(
'answer ccccc', 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 3.0),
stats_domain.SubmittedAnswer(
'answer bbbbbbb', 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 7.5),
]
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list)
# The order of the answers returned depends on the size of the answers.
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(state_answers.exploration_id, 'exp_id0')
self.assertEqual(state_answers.exploration_version, 1)
self.assertEqual(
state_answers.state_name, feconf.DEFAULT_INIT_STATE_NAME)
self.assertEqual(state_answers.interaction_id, 'TextInput')
self.assertEqual(state_answers.get_submitted_answer_dict_list(), [{
'answer': '1 answer',
'time_spent_in_sec': 1.0,
'answer_group_index': 0,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'a_session_id_val',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'answer aaa',
'time_spent_in_sec': 10.0,
'answer_group_index': 0,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'answer ccccc',
'time_spent_in_sec': 3.0,
'answer_group_index': 1,
'rule_spec_index': 1,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}, {
'answer': 'answer bbbbbbb',
'time_spent_in_sec': 7.5,
'answer_group_index': 1,
'rule_spec_index': 0,
'classification_categorization': 'explicit',
'session_id': 'session_id_v',
'interaction_id': 'TextInput',
'params': {}
}])
class SampleAnswerTests(test_utils.GenericTestBase):
"""Tests for functionality related to retrieving sample answers."""
EXP_ID = 'exp_id0'
def setUp(self):
super(SampleAnswerTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.exploration = self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
def test_at_most_100_answers_returned_even_if_there_are_lots(self):
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer a', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 10.0),
stats_domain.SubmittedAnswer(
'answer ccc', 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 3.0),
stats_domain.SubmittedAnswer(
'answer bbbbb', 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 7.5),
]
# Record 600 answers.
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list * 200)
sample_answers = stats_services.get_sample_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(len(sample_answers), 100)
def test_exactly_100_answers_returned_if_main_shard_has_100_answers(self):
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer a', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 10.0)
]
# Record 100 answers.
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list * 100)
sample_answers = stats_services.get_sample_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(sample_answers, ['answer a'] * 100)
def test_all_answers_returned_if_main_shard_has_few_answers(self):
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer a', 'TextInput', 0, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 10.0),
stats_domain.SubmittedAnswer(
'answer bbbbb', 'TextInput', 1, 0,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v', 7.5),
]
# Record 2 answers.
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list)
sample_answers = stats_services.get_sample_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(sample_answers, ['answer a', 'answer bbbbb'])
def test_only_sample_answers_in_main_shard_returned(self):
# Use a smaller max answer list size so fewer answers are needed to
# exceed a shard.
with self.swap(
stats_models.StateAnswersModel, '_MAX_ANSWER_LIST_BYTE_SIZE',
15000):
state_answers = stats_services.get_state_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertIsNone(state_answers)
submitted_answer_list = [
stats_domain.SubmittedAnswer(
'answer ccc', 'TextInput', 1, 1,
exp_domain.EXPLICIT_CLASSIFICATION, {}, 'session_id_v',
3.0),
]
stats_services.record_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name, 'TextInput',
submitted_answer_list * 100)
# Verify more than 1 shard was stored. The index shard (shard_id 0)
# is not included in the shard count. Since a total of 100 answers were
# submitted, there must therefore be fewer than 100 answers in the
# index shard.
model = stats_models.StateAnswersModel.get('%s:%s:%s:%s' % (
self.exploration.id, str(self.exploration.version),
self.exploration.init_state_name, '0'))
self.assertEqual(model.shard_count, 1)
# Verify that the list of sample answers returned contains fewer than
# 100 answers, although a total of 100 answers were submitted.
sample_answers = stats_services.get_sample_answers(
self.EXP_ID, self.exploration.version,
self.exploration.init_state_name)
self.assertLess(len(sample_answers), 100)
def test_get_sample_answers_with_invalid_exp_id(self):
sample_answers = stats_services.get_sample_answers(
'invalid_exp_id', self.exploration.version,
self.exploration.init_state_name)
self.assertEqual(sample_answers, [])
# TODO(bhenning): Either add tests for multiple visualizations for one state or
# disallow stats from having multiple visualizations (no interactions currently
# seem to use more than one visualization ID).
# TODO(bhenning): Add tests for each possible visualization
# (TopAnswersByCategorization is not currently used yet by any interactions).
class AnswerVisualizationsTests(test_utils.GenericTestBase):
"""Tests for functionality related to retrieving visualization information
for answers.
"""
ALL_CC_MANAGERS_FOR_TESTS = [MockInteractionAnswerSummariesAggregator]
INIT_STATE_NAME = feconf.DEFAULT_INIT_STATE_NAME
TEXT_INPUT_EXP_ID = 'exp_id0'
SET_INPUT_EXP_ID = 'exp_id1'
DEFAULT_EXP_ID = 'exp_id2'
NEW_STATE_NAME = 'new state'
def _get_swap_context(self):
"""Substitutes the jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS
value with ALL_CC_MANAGERS_FOR_TESTS.
"""
return self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CC_MANAGERS_FOR_TESTS)
def _get_visualizations(
self, exp_id=TEXT_INPUT_EXP_ID, state_name=INIT_STATE_NAME):
"""Returns the visualizations info corresponding to the given
exploration id and state name.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
init_state = exploration.states[state_name]
return stats_services.get_visualizations_info(
exp_id, state_name, init_state.interaction.id)
def _record_answer(
self, answer, exp_id=TEXT_INPUT_EXP_ID, state_name=INIT_STATE_NAME):
"""Records the submitted answer corresponding to the given exploration
id and state name.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
interaction_id = exploration.states[state_name].interaction.id
event_services.AnswerSubmissionEventHandler.record(
exp_id, exploration.version, state_name, interaction_id, 0, 0,
exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', 10.0, {}, answer)
def _run_answer_summaries_aggregator(self):
"""Runs the MockInteractionAnswerSummariesAggregator."""
MockInteractionAnswerSummariesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
def _rerun_answer_summaries_aggregator(self):
"""Reruns the MockInteractionAnswerSummariesAggregator."""
MockInteractionAnswerSummariesAggregator.stop_computation('a')
self._run_answer_summaries_aggregator()
def _rename_state(
self, new_state_name, exp_id=TEXT_INPUT_EXP_ID,
state_name=INIT_STATE_NAME):
"""Renames the state corresponding to the given exploration id
and state name.
"""
exp_services.update_exploration(
self.owner_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_RENAME_STATE,
'old_state_name': state_name,
'new_state_name': new_state_name
})], 'Update state name')
def _change_state_interaction_id(
self, interaction_id, exp_id=TEXT_INPUT_EXP_ID,
state_name=INIT_STATE_NAME):
"""Updates the state interaction id corresponding to the given
exploration id and state name.
"""
exp_services.update_exploration(
self.owner_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'new_value': interaction_id
})], 'Update state interaction ID')
def _change_state_content(
self, new_content, exp_id=TEXT_INPUT_EXP_ID,
state_name=INIT_STATE_NAME):
"""Updates the state content corresponding to the given exploration id
and state name.
"""
exp_services.update_exploration(
self.owner_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'state_name': state_name,
'property_name': exp_domain.STATE_PROPERTY_CONTENT,
'new_value': {
'content_id': 'content',
'html': new_content
}
})], 'Change content description')
def setUp(self):
super(AnswerVisualizationsTests, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.save_new_valid_exploration(
self.TEXT_INPUT_EXP_ID, self.owner_id, end_state_name='End')
self.save_new_valid_exploration(
self.SET_INPUT_EXP_ID, self.owner_id, end_state_name='End',
interaction_id='SetInput')
self.save_new_default_exploration(self.DEFAULT_EXP_ID, self.owner_id)
def test_no_vis_info_for_exp_with_no_interaction_id(self):
with self._get_swap_context():
visualizations = self._get_visualizations(
exp_id=self.DEFAULT_EXP_ID)
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_no_answers_no_calculations(self):
with self._get_swap_context():
visualizations = self._get_visualizations()
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_answer_no_completed_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
visualizations = self._get_visualizations()
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_no_answers_but_with_calculations(self):
with self._get_swap_context():
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
self.assertEqual(visualizations, [])
def test_has_vis_info_options_for_text_input_interaction(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
# There are two visualizations for TextInput. One for top answers
# and second is for top unresolved answers but top unresolved
# answers visualization is not shown as part of exploration stats.
self.assertEqual(len(visualizations), 1)
visualization = visualizations[0]
self.assertEqual(
visualization['options']['column_headers'], ['Answer', 'Count'])
self.assertIn('Top', visualization['options']['title'])
def test_has_vis_info_for_exp_with_answer_for_one_calculation(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
# There are two visualizations for TextInput. One for top answers
# and second is for top unresolved answers but top unresolved
# answers visualization is not shown as part of exploration stats.
self.assertEqual(len(visualizations), 1)
visualization = visualizations[0]
self.assertEqual(visualization['id'], 'FrequencyTable')
self.assertEqual(
visualization['data'], [{
'answer': 'Answer A',
'frequency': 1
}])
def test_has_vis_info_for_exp_with_many_answers_for_one_calculation(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._record_answer('Answer A')
self._record_answer('Answer C')
self._record_answer('Answer B')
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
# There are two visualizations for TextInput. One for top answers
# and second is for top unresolved answers but top unresolved
# answers visualization is not shown as part of exploration stats.
self.assertEqual(len(visualizations), 1)
visualization = visualizations[0]
self.assertEqual(visualization['id'], 'FrequencyTable')
# Ties will appear in same order they are submitted in.
self.assertEqual(
visualization['data'], [{
'answer': 'Answer A',
'frequency': 3
}, {
'answer': 'Answer C',
'frequency': 1
}, {
'answer': 'Answer B',
'frequency': 1
}])
def test_has_vis_info_for_each_calculation_for_multi_calc_exp(self):
with self._get_swap_context():
self._record_answer(['A', 'B'], exp_id=self.SET_INPUT_EXP_ID)
self._record_answer(['C', 'A'], exp_id=self.SET_INPUT_EXP_ID)
self._record_answer(['A', 'B'], exp_id=self.SET_INPUT_EXP_ID)
self._record_answer(['A'], exp_id=self.SET_INPUT_EXP_ID)
self._record_answer(['A'], exp_id=self.SET_INPUT_EXP_ID)
self._record_answer(['A', 'B'], exp_id=self.SET_INPUT_EXP_ID)
self._run_answer_summaries_aggregator()
visualizations = sorted(
self._get_visualizations(exp_id=self.SET_INPUT_EXP_ID),
key=operator.itemgetter('data'))
self.assertEqual(len(visualizations), 2)
# Use options to distinguish between the two visualizations, since
# both are FrequencyTable.
top_answers_visualization = visualizations[0]
self.assertEqual(top_answers_visualization['id'], 'FrequencyTable')
self.assertEqual(
top_answers_visualization['options']['column_headers'],
['Answer', 'Count'])
self.assertEqual(
top_answers_visualization['data'], [{
'answer': ['A', 'B'],
'frequency': 3
}, {
'answer': ['A'],
'frequency': 2
}, {
'answer': ['C', 'A'],
'frequency': 1
}])
common_elements_visualization = visualizations[1]
self.assertEqual(
common_elements_visualization['id'], 'FrequencyTable')
self.assertEqual(
common_elements_visualization['options']['column_headers'],
['Element', 'Count'])
common_visualization_data = (
common_elements_visualization['data'])
self.assertEqual(
common_visualization_data, [{
'answer': 'A',
'frequency': 6
}, {
'answer': 'B',
'frequency': 3
}, {
'answer': 'C',
'frequency': 1
}])
def test_retrieves_latest_vis_info_with_rounds_of_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._record_answer('Answer C')
self._run_answer_summaries_aggregator()
# Submit a new answer and run the aggregator again.
self._record_answer('Answer A')
self._rerun_answer_summaries_aggregator()
visualizations = self._get_visualizations()
# There are two visualizations for TextInput. One for top answers
# and second is for top unresolved answers but top unresolved
# answers visualization is not shown as part of exploration stats.
self.assertEqual(len(visualizations), 1)
visualization = visualizations[0]
# The latest data should include all submitted answers.
self.assertEqual(
visualization['data'], [{
'answer': 'Answer A',
'frequency': 2
}, {
'answer': 'Answer C',
'frequency': 1
}])
def test_retrieves_vis_info_across_multiple_exploration_versions(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._record_answer('Answer B')
# Change the exploration version and submit a new answer.
self._change_state_content('<p>New content</p>')
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
# There are two visualizations for TextInput. One for top answers
# and second is for top unresolved answers but top unresolved
# answers visualization is not shown as part of exploration stats.
self.assertEqual(len(visualizations), 1)
visualization = visualizations[0]
# The latest data should include all submitted answers.
self.assertEqual(
visualization['data'], [{
'answer': 'Answer A',
'frequency': 2
}, {
'answer': 'Answer B',
'frequency': 1
}])
def test_no_vis_info_for_exp_with_new_state_name_before_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._rename_state(self.NEW_STATE_NAME)
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations(
state_name=self.NEW_STATE_NAME)
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_new_state_name_after_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
self._rename_state(self.NEW_STATE_NAME)
visualizations = self._get_visualizations(
state_name=self.NEW_STATE_NAME)
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_new_interaction_before_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._change_state_interaction_id('SetInput')
self._run_answer_summaries_aggregator()
visualizations = self._get_visualizations()
self.assertEqual(visualizations, [])
def test_no_vis_info_for_exp_with_new_interaction_after_calculations(self):
with self._get_swap_context():
self._record_answer('Answer A')
self._run_answer_summaries_aggregator()
self._change_state_interaction_id('SetInput')
visualizations = self._get_visualizations()
self.assertEqual(visualizations, [])
class StateAnswersStatisticsTest(test_utils.GenericTestBase):
"""Tests for functionality related to retrieving statistics for answers of a
particular state.
"""
STATE_NAMES = ['STATE A', 'STATE B', 'STATE C']
EXP_ID = 'exp_id'
def _get_top_state_answer_stats(
self, exp_id=EXP_ID, state_name=STATE_NAMES[0]):
"""Returns the top answer stats corresponding to the given exploration
id and state names.
"""
return stats_services.get_top_state_answer_stats(exp_id, state_name)
def _get_top_state_unresolved_answer_stats(
self, exp_id=EXP_ID, state_name=STATE_NAMES[0]):
"""Returns the top unresolved answer stats corresponding to the given
exploration id and state names.
"""
return stats_services.get_top_state_unresolved_answers(
exp_id, state_name)
def _get_top_state_answer_stats_multi(
self, state_names, exp_id=EXP_ID):
"""Returns the top answer stats corresponding to the given exploration
id and state names.
"""
return stats_services.get_top_state_answer_stats_multi(
exp_id, state_names)
def _record_answer(
self, answer, exp_id=EXP_ID, state_name=STATE_NAMES[0],
classification_category=exp_domain.EXPLICIT_CLASSIFICATION):
"""Records the submitted answer corresponding to the given interaction
id in an exploration.
"""
exploration = exp_fetchers.get_exploration_by_id(exp_id)
interaction_id = exploration.states[state_name].interaction.id
event_services.AnswerSubmissionEventHandler.record(
exp_id, exploration.version, state_name, interaction_id, 0, 0,
classification_category, 'sid1', 10.0, {}, answer)
def _run_answer_summaries_aggregator(self):
"""Runs the MockInteractionAnswerSummariesAggregator."""
MockInteractionAnswerSummariesAggregator.start_computation()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
MockInteractionAnswerSummariesAggregator.stop_computation(
feconf.SYSTEM_COMMITTER_ID)
def setUp(self):
super(StateAnswersStatisticsTest, self).setUp()
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(self.owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.save_new_linear_exp_with_state_names_and_interactions(
self.EXP_ID, self.owner_id, self.STATE_NAMES, ['TextInput'])
def test_get_top_state_unresolved_answer_stats(self):
self._record_answer(
'A', classification_category=exp_domain.EXPLICIT_CLASSIFICATION)
self._record_answer(
'B', classification_category=exp_domain.EXPLICIT_CLASSIFICATION)
self._record_answer(
'C', classification_category=exp_domain.STATISTICAL_CLASSIFICATION)
self._record_answer(
'A', classification_category=exp_domain.STATISTICAL_CLASSIFICATION)
self._record_answer(
'D',
classification_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION)
self._record_answer(
'E',
classification_category=exp_domain.DEFAULT_OUTCOME_CLASSIFICATION)
self._record_answer(
'D', classification_category=exp_domain.EXPLICIT_CLASSIFICATION)
self._run_answer_summaries_aggregator()
with self.swap(feconf, 'STATE_ANSWER_STATS_MIN_FREQUENCY', 1):
state_answers_stats = self._get_top_state_unresolved_answer_stats()
self.assertEqual(
state_answers_stats, [
{'answer': 'A', 'frequency': 2},
{'answer': 'C', 'frequency': 1},
{'answer': 'E', 'frequency': 1}
])
self._record_answer(
'A', classification_category=exp_domain.EXPLICIT_CLASSIFICATION)
self._record_answer(
'E', classification_category=exp_domain.EXPLICIT_CLASSIFICATION)
self._run_answer_summaries_aggregator()
with self.swap(feconf, 'STATE_ANSWER_STATS_MIN_FREQUENCY', 1):
state_answers_stats = self._get_top_state_unresolved_answer_stats()
self.assertEqual(
state_answers_stats, [
{'answer': 'C', 'frequency': 1}
])
def test_get_top_state_answer_stats(self):
self._record_answer('A')
self._record_answer('B')
self._record_answer('A')
self._record_answer('A')
self._record_answer('B')
self._record_answer('C')
self._run_answer_summaries_aggregator()
with self.swap(feconf, 'STATE_ANSWER_STATS_MIN_FREQUENCY', 2):
state_answers_stats = self._get_top_state_answer_stats()
self.assertEqual(
state_answers_stats, [
{'answer': 'A', 'frequency': 3},
{'answer': 'B', 'frequency': 2},
# C is not included because min frequency is 2.
])
def test_get_top_state_answer_stats_multi(self):
self._record_answer('A', state_name='STATE A')
self._record_answer('A', state_name='STATE A')
self._record_answer('B', state_name='STATE A')
self._record_answer(1, state_name='STATE B')
self._record_answer(1, state_name='STATE B')
self._record_answer(2, state_name='STATE B')
self._record_answer('X', state_name='STATE C')
self._record_answer('X', state_name='STATE C')
self._record_answer('Y', state_name='STATE C')
self._run_answer_summaries_aggregator()
with self.swap(feconf, 'STATE_ANSWER_STATS_MIN_FREQUENCY', 1):
state_answers_stats_multi = self._get_top_state_answer_stats_multi(
['STATE A', 'STATE B'])
self.assertEqual(sorted(state_answers_stats_multi), [
'STATE A',
'STATE B',
])
self.assertEqual(state_answers_stats_multi['STATE A'], [
{'answer': 'A', 'frequency': 2},
{'answer': 'B', 'frequency': 1},
])
self.assertEqual(state_answers_stats_multi['STATE B'], [
{'answer': 1, 'frequency': 2},
{'answer': 2, 'frequency': 1},
])
class LearnerAnswerDetailsServicesTest(test_utils.GenericTestBase):
"""Test for services related to learner answer details."""
def setUp(self):
super(LearnerAnswerDetailsServicesTest, self).setUp()
self.exp_id = 'exp_id1'
self.state_name = 'intro'
self.question_id = 'q_id_1'
self.interaction_id = 'TextInput'
self.state_reference_exploration = (
stats_models.LearnerAnswerDetailsModel.get_state_reference_for_exploration( #pylint: disable=line-too-long
self.exp_id, self.state_name))
self.state_reference_question = (
stats_models.LearnerAnswerDetailsModel.get_state_reference_for_question( #pylint: disable=line-too-long
self.question_id))
self.learner_answer_details_model_exploration = (
stats_models.LearnerAnswerDetailsModel.create_model_instance(
feconf.ENTITY_TYPE_EXPLORATION,
self.state_reference_exploration, self.interaction_id, [],
feconf.CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION, 0))
self.learner_answer_details_model_question = (
stats_models.LearnerAnswerDetailsModel.create_model_instance(
feconf.ENTITY_TYPE_QUESTION,
self.state_reference_question, self.interaction_id, [],
feconf.CURRENT_LEARNER_ANSWER_INFO_SCHEMA_VERSION, 0))
def test_get_state_reference_for_exp_raises_error_for_fake_exp_id(self):
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
with self.assertRaisesRegexp(
Exception, 'Entity .* not found'):
stats_services.get_state_reference_for_exploration(
'fake_exp', 'state_name')
def test_get_state_reference_for_exp_raises_error_for_invalid_state_name(
self):
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
exploration = self.save_new_default_exploration(
self.exp_id, owner_id)
self.assertEqual(exploration.states.keys(), ['Introduction'])
with self.assertRaisesRegexp(
utils.InvalidInputException,
'No state with the given state name was found'):
stats_services.get_state_reference_for_exploration(
self.exp_id, 'state_name')
def test_get_state_reference_for_exp_for_valid_exp_id_and_state_name(self):
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.create_new_user(owner_id, self.OWNER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
exploration = self.save_new_default_exploration(
self.exp_id, owner_id)
self.assertEqual(exploration.states.keys(), ['Introduction'])
state_reference = (
stats_services.get_state_reference_for_exploration(
self.exp_id, 'Introduction'))
self.assertEqual(state_reference, 'exp_id1:Introduction')
def test_get_state_reference_for_question_with_invalid_question_id(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'No question with the given question id exists'):
stats_services.get_state_reference_for_question(
'fake_question_id')
def test_get_state_reference_for_question_with_valid_question_id(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(
self.EDITOR_EMAIL)
question_id = question_services.get_new_question_id()
question = self.save_new_question(
question_id, editor_id,
self._create_valid_question_data('ABC'), ['skill_1'])
self.assertNotEqual(question, None)
state_reference = (
stats_services.get_state_reference_for_question(question_id))
self.assertEqual(state_reference, question_id)
def test_update_learner_answer_details(self):
answer = 'This is my answer'
answer_details = 'This is my answer details'
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 0)
stats_services.record_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
self.interaction_id, answer, answer_details)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 1)
answer = 'My answer'
answer_details = 'My answer details'
stats_services.record_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
self.interaction_id, answer, answer_details)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 2)
def test_delete_learner_answer_info(self):
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 0)
answer = 'This is my answer'
answer_details = 'This is my answer details'
stats_services.record_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
self.interaction_id, answer, answer_details)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 1)
learner_answer_info_id = (
learner_answer_details.learner_answer_info_list[0].id)
stats_services.delete_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
learner_answer_info_id)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 0)
def test_delete_learner_answer_info_with_invalid_input(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'No learner answer details found with the given state reference'):
stats_services.delete_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, 'expID:stateName', 'id_1')
def test_delete_learner_answer_info_with_unknown_learner_answer_info_id(
self):
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 0)
answer = 'This is my answer'
answer_details = 'This is my answer details'
stats_services.record_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
self.interaction_id, answer, answer_details)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 1)
learner_answer_info_id = 'id_1'
with self.assertRaisesRegexp(
Exception, 'Learner answer info with the given id not found'):
stats_services.delete_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION,
self.state_reference_exploration, learner_answer_info_id)
def test_update_state_reference(self):
new_state_reference = 'exp_id_2:state_name_2'
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertNotEqual(
learner_answer_details.state_reference, new_state_reference)
stats_services.update_state_reference(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration,
new_state_reference)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, new_state_reference)
self.assertEqual(
learner_answer_details.state_reference, new_state_reference)
def test_new_learner_answer_details_is_created(self):
state_reference = 'exp_id_2:state_name_2'
interaction_id = 'GraphInput'
answer = 'Hello World'
answer_details = 'Hello Programmer'
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, state_reference)
self.assertEqual(learner_answer_details, None)
stats_services.record_learner_answer_info(
feconf.ENTITY_TYPE_EXPLORATION, state_reference,
interaction_id, answer, answer_details)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, state_reference)
self.assertNotEqual(learner_answer_details, None)
self.assertEqual(
learner_answer_details.state_reference, state_reference)
self.assertEqual(learner_answer_details.interaction_id, interaction_id)
self.assertEqual(
len(learner_answer_details.learner_answer_info_list), 1)
def test_update_with_invalid_input_raises_exception(self):
with self.assertRaisesRegexp(
utils.InvalidInputException,
'No learner answer details found with the given state reference'):
stats_services.update_state_reference(
feconf.ENTITY_TYPE_EXPLORATION, 'expID:stateName',
'newexp:statename')
def test_delete_learner_answer_details_for_exploration_state(self):
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertNotEqual(learner_answer_details, None)
stats_services.delete_learner_answer_details_for_exploration_state(
self.exp_id, self.state_name)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_EXPLORATION, self.state_reference_exploration)
self.assertEqual(learner_answer_details, None)
def test_delete_learner_answer_details_for_question_state(self):
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_QUESTION, self.state_reference_question)
self.assertNotEqual(learner_answer_details, None)
stats_services.delete_learner_answer_details_for_question_state(
self.question_id)
learner_answer_details = stats_services.get_learner_answer_details(
feconf.ENTITY_TYPE_QUESTION, self.state_reference_question)
self.assertEqual(learner_answer_details, None)
| apache-2.0 | -6,070,531,319,868,672,000 | 43.937956 | 118 | 0.603003 | false |
skysports-digitalmedia/php-buildpack | tests/test_compile_helpers.py | 1 | 15810 | import os
import os.path
import tempfile
import shutil
from nose.tools import eq_
from build_pack_utils import utils
from compile_helpers import setup_webdir_if_it_doesnt_exist
from compile_helpers import convert_php_extensions
from compile_helpers import is_web_app
from compile_helpers import find_stand_alone_app_to_run
from compile_helpers import load_binary_index
from compile_helpers import find_all_php_versions
from compile_helpers import find_all_php_extensions
from compile_helpers import validate_php_version
from compile_helpers import validate_php_extensions
from compile_helpers import setup_log_dir
class TestCompileHelpers(object):
def setUp(self):
self.build_dir = tempfile.mkdtemp(prefix='build-')
self.cache_dir = tempfile.mkdtemp(prefix='cache-')
os.rmdir(self.build_dir) # delete otherwise copytree complains
os.rmdir(self.cache_dir) # cache dir does not exist normally
def tearDown(self):
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
for name in os.listdir(os.environ['TMPDIR']):
if name.startswith('httpd-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
if name.startswith('php-') and name.endswith('.gz'):
os.remove(os.path.join(os.environ['TMPDIR'], name))
def assert_exists(self, *args):
eq_(True, os.path.exists(os.path.join(*args)),
"Does not exists: %s" % os.path.join(*args))
def test_setup_log_dir(self):
eq_(False, os.path.exists(os.path.join(self.build_dir, 'logs')))
setup_log_dir({
'BUILD_DIR': self.build_dir
})
self.assert_exists(self.build_dir, 'logs')
def test_setup_if_webdir_exists(self):
shutil.copytree('tests/data/app-1', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_exists(self):
shutil.copytree('tests/data/app-6', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(3, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_htdocs_does_not_exist_but_library_does(self):
shutil.copytree('tests/data/app-7', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, 'htdocs', 'library')
self.assert_exists(self.build_dir, 'htdocs', 'library', 'junk.php')
self.assert_exists(self.build_dir, 'lib')
self.assert_exists(self.build_dir, 'lib', 'test.php')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, 'manifest.yml')
eq_(4, len(os.listdir(self.build_dir)))
eq_(4, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist(self):
shutil.copytree('tests/data/app-2', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
eq_(2, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'htdocs',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'htdocs')
self.assert_exists(self.build_dir, 'htdocs', 'index.php')
self.assert_exists(self.build_dir, 'htdocs', 'info.php')
self.assert_exists(self.build_dir, 'htdocs',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'htdocs'))))
def test_setup_if_custom_webdir_does_not_exist_with_extensions(self):
shutil.copytree('tests/data/app-4', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEBDIR': 'public',
'LIBDIR': 'lib'
}))
self.assert_exists(self.build_dir, 'public')
self.assert_exists(self.build_dir, 'public', 'index.php')
self.assert_exists(self.build_dir, 'public', 'info.php')
self.assert_exists(self.build_dir, 'public',
'technical-difficulties1.jpg')
self.assert_exists(self.build_dir, '.bp-config')
self.assert_exists(self.build_dir, '.bp-config', 'options.json')
self.assert_exists(self.build_dir, '.bp-config', 'httpd', 'extra',
'httpd-remoteip.conf')
self.assert_exists(self.build_dir, '.bp')
self.assert_exists(self.build_dir, '.bp', 'logs')
self.assert_exists(self.build_dir, '.bp', 'logs', 'some.log')
self.assert_exists(self.build_dir, '.extensions')
self.assert_exists(self.build_dir, '.extensions', 'some-ext')
self.assert_exists(self.build_dir, '.extensions', 'some-ext',
'extension.py')
eq_(4, len(os.listdir(self.build_dir)))
eq_(3, len(os.listdir(os.path.join(self.build_dir, 'public'))))
def test_setup_if_htdocs_with_stand_alone_app(self):
shutil.copytree('tests/data/app-5', self.build_dir)
setup_webdir_if_it_doesnt_exist(utils.FormattedDict({
'BUILD_DIR': self.build_dir,
'WEB_SERVER': 'none'
}))
self.assert_exists(self.build_dir, 'app.php')
eq_(1, len(os.listdir(self.build_dir)))
def test_convert_php_extensions_54(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"\n'
'zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1', 'mod2', 'mod3'],
'ZEND_EXTENSIONS': ['zmod1', 'zmod2']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so\nextension=mod2.so\nextension=mod3.so',
ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"\nzend_extension="zmod2.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_none(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_none(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': [],
'ZEND_EXTENSIONS': []
}
convert_php_extensions(ctx)
eq_('', ctx['PHP_EXTENSIONS'])
eq_('', ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_54_one(self):
ctx = {
'PHP_VERSION': '5.4.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="@HOME/php/lib/php/extensions/'
'no-debug-non-zts-20100525/zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_convert_php_extensions_55_one(self):
ctx = {
'PHP_VERSION': '5.5.x',
'PHP_EXTENSIONS': ['mod1'],
'ZEND_EXTENSIONS': ['zmod1']
}
convert_php_extensions(ctx)
eq_('extension=mod1.so', ctx['PHP_EXTENSIONS'])
eq_('zend_extension="zmod1.so"',
ctx['ZEND_EXTENSIONS'])
def test_is_web_app(self):
ctx = {}
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'nginx'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'httpd'
eq_(True, is_web_app(ctx))
ctx['WEB_SERVER'] = 'none'
eq_(False, is_web_app(ctx))
def test_find_stand_alone_app_to_run_app_start_cmd(self):
ctx = {'APP_START_CMD': "echo 'Hello World!'"}
eq_("echo 'Hello World!'", find_stand_alone_app_to_run(ctx))
results = ('app.php', 'main.php', 'run.php', 'start.php', 'app.php')
for i, res in enumerate(results):
ctx = {'BUILD_DIR': 'tests/data/standalone/test%d' % (i + 1)}
eq_(res, find_stand_alone_app_to_run(ctx))
def test_load_binary_index(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
assert json is not None
assert 'php' in json.keys()
eq_(9, len(json['php'].keys()))
def test_find_all_php_versions(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
versions = find_all_php_versions(json)
eq_(9, len(versions))
eq_(3, len([v for v in versions if v.startswith('5.4.')]))
eq_(3, len([v for v in versions if v.startswith('5.5.')]))
def test_find_php_extensions(self):
ctx = {'BP_DIR': '.', 'STACK': 'lucid'}
json = load_binary_index(ctx)
exts = find_all_php_extensions(json)
eq_(9, len(exts.keys()))
tmp = exts[[key for key in exts.keys() if key.startswith('5.4')][0]]
assert 'amqp' in tmp
assert 'apc' in tmp
assert 'imap' in tmp
assert 'ldap' in tmp
assert 'phalcon' in tmp
assert 'pspell' in tmp
assert 'pdo_pgsql' in tmp
assert 'mailparse' in tmp
assert 'redis' in tmp
assert 'pgsql' in tmp
assert 'snmp' in tmp
assert 'cgi' not in tmp
assert 'cli' not in tmp
assert 'fpm' not in tmp
assert 'pear' not in tmp
def test_validate_php_version(self):
ctx = {
'ALL_PHP_VERSIONS': ['5.4.31', '5.4.30'],
'PHP_54_LATEST': '5.4.31',
'PHP_VERSION': '5.4.30'
}
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.29'
validate_php_version(ctx)
eq_('5.4.31', ctx['PHP_VERSION'])
ctx['PHP_VERSION'] = '5.4.30'
validate_php_version(ctx)
eq_('5.4.30', ctx['PHP_VERSION'])
def test_validate_php_extensions(self):
ctx = {
'ALL_PHP_EXTENSIONS': {
'5.4.31': ['curl', 'pgsql', 'snmp', 'phalcon']
},
'PHP_VERSION': '5.4.31',
'PHP_EXTENSIONS': ['curl', 'snmp']
}
validate_php_extensions(ctx)
eq_(2, len(ctx['PHP_EXTENSIONS']))
assert 'curl' in ctx['PHP_EXTENSIONS']
assert 'snmp' in ctx['PHP_EXTENSIONS']
ctx['PHP_EXTENSIONS'] = ['curl', 'pspell', 'imap', 'phalcon']
validate_php_extensions(ctx)
eq_(2, len(ctx['PHP_EXTENSIONS']))
assert 'curl' in ctx['PHP_EXTENSIONS']
assert 'phalcon' in ctx['PHP_EXTENSIONS']
| apache-2.0 | 2,697,824,671,140,327,000 | 42.315068 | 76 | 0.569892 | false |
omarios90/openshift-cartridge-python-3.5.1 | template/config/utils/excel_styles.py | 1 | 3230 | from xlwt import Font, Borders, Alignment, XFStyle, Pattern, Style
from .utils import EmptyClass
# PRE STYLES
# ====================================================
PreStyles = EmptyClass()
# title
PreStyles.title = Font()
PreStyles.title.height = 360
PreStyles.title.bold = True
# bold
PreStyles.bold = Font()
PreStyles.bold.bold = True
# bordered
PreStyles.bordered_full = Borders()
PreStyles.bordered_full.left = 6
PreStyles.bordered_full.right = 6
PreStyles.bordered_full.top = 6
PreStyles.bordered_full.bottom = 6
# border right
PreStyles.border_right = Borders()
PreStyles.border_right.right = 18
# alignment - center & center
PreStyles.align_center = Alignment()
PreStyles.align_center.horz = Alignment.HORZ_CENTER
PreStyles.align_center.vert = Alignment.VERT_CENTER
# alignment - right & center
PreStyles.align_right = Alignment()
PreStyles.align_right.horz = Alignment.HORZ_RIGHT
PreStyles.align_right.vert = Alignment.VERT_CENTER
# alignment - left & center
PreStyles.align_left = Alignment()
PreStyles.align_left.horz = Alignment.HORZ_LEFT
PreStyles.align_left.vert = Alignment.VERT_CENTER
# alignment - center & center & multi-line
PreStyles.align_center_multiline = Alignment()
PreStyles.align_center_multiline.horz = Alignment.HORZ_CENTER
PreStyles.align_center_multiline.vert = Alignment.VERT_CENTER
PreStyles.align_center_multiline.wrap = 1
# STYLES
# ====================================================
Styles = EmptyClass()
# title
Styles.title = XFStyle()
Styles.title.font = PreStyles.title
Styles.title.alignment = PreStyles.align_center
# bold
Styles.bold = XFStyle()
Styles.bold.font = PreStyles.bold
# bold & centered
Styles.bold_centered = XFStyle()
Styles.bold_centered.alignment = PreStyles.align_center
Styles.bold_centered.font = PreStyles.bold
# bold & right
Styles.bold_right = XFStyle()
Styles.bold_right.alignment = PreStyles.align_right
Styles.bold_right.font = PreStyles.bold
# bold & centered & bordered
Styles.bold_centered_bordered = XFStyle()
Styles.bold_centered_bordered.alignment = PreStyles.align_center
Styles.bold_centered_bordered.font = PreStyles.bold
Styles.bold_centered_bordered.borders = PreStyles.bordered_full
# normal & centered & bordered
Styles.normal_centered = XFStyle()
Styles.normal_centered.alignment = PreStyles.align_center
Styles.normal_centered.borders = PreStyles.bordered_full
# centered
Styles.centered = XFStyle()
Styles.centered.alignment = PreStyles.align_center
# centered multilined
Styles.centered_multilined = XFStyle()
Styles.centered_multilined.alignment = PreStyles.align_center_multiline
# centered border right
Styles.centered_border_right = XFStyle()
Styles.centered_border_right.alignment = PreStyles.align_center
Styles.centered_border_right.borders = PreStyles.border_right
# left
Styles.left = XFStyle()
Styles.left.alignment = PreStyles.align_left
# right
Styles.right = XFStyle()
Styles.right.alignment = PreStyles.align_right
# border right
Styles.border_right = XFStyle()
Styles.border_right.borders = PreStyles.border_right
# //////
formula_style = XFStyle()
patternF = Pattern()
patternF.pattern = Pattern.SOLID_PATTERN
patternF.pattern_fore_colour = Style.colour_map['yellow']
formula_style.pattern = patternF
| mit | 8,924,968,726,708,385,000 | 26.606838 | 71 | 0.755108 | false |
soarpenguin/python-scripts | terminal.py | 1 | 1467 | #!/usr/bin/env python
import os
def clrscr():
""" Clear screen and move cursor to 1,1 (upper left) pos. """
print '\033[2J\033[1;1H'
def clreol():
""" Erases from the current cursor position to the end of the current line. """
print '\033[K'
def delline():
""" Erases the entire current line. """
print '\033[2K'
def gotoxy(x, y):
""" Moves the cursor to the specified position. """
print "\033[%d;%dH" % (x, y)
def _ioctl_GWINSZ(fd): #### TABULATION FUNCTIONS
try: ### Discover terminal width
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
def terminal_size(): ### decide on *some* terminal size
"""Return (lines, columns)."""
cr = _ioctl_GWINSZ(0) or _ioctl_GWINSZ(1) or _ioctl_GWINSZ(2) # try open fds
if not cr: # ...then ctty
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = _ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr: # env vars or finally defaults
try:
cr = os.environ['LINES'], os.environ['COLUMNS']
except:
cr = 25, 80
return int(cr[1]), int(cr[0]) # reverse rows, cols
| gpl-3.0 | 4,358,864,761,990,705,700 | 30.891304 | 83 | 0.498978 | false |
igordejanovic/parglare | tests/func/parsing/test_dynamic_disambiguation_filters.py | 1 | 3477 | import pytest # noqa
from parglare import GLRParser, Grammar, Parser, SHIFT, REDUCE
from parglare.exceptions import SRConflicts
grammar = r"""
E: E op_sum E {dynamic}
| E op_mul E {dynamic}
| number;
terminals
number: /\d+/;
op_sum: '+' {dynamic};
op_mul: '*' {dynamic};
"""
instr1 = '1 + 2 * 5 + 3'
instr2 = '1 * 2 + 5 * 3'
actions = {
'E': [lambda _, nodes: nodes[0] + nodes[2],
lambda _, nodes: nodes[0] * nodes[2],
lambda _, nodes: float(nodes[0])]
}
g = Grammar.from_string(grammar)
operations = []
def custom_disambiguation_filter(context, from_state, to_state, action,
production, subresults):
"""
Make first operation that appears in the input as lower priority.
This demonstrates how priority rule can change dynamically depending
on the input or how disambiguation can be decided during parsing.
"""
global operations
# At the start of parsing this function is called with actions set to None
# to give a chance for the strategy to initialize.
if action is None:
operations = []
return
if action is SHIFT:
operation = context.token.symbol
else:
operation = context.token_ahead.symbol
actions = from_state.actions[operation]
if operation not in operations and operation.name != 'STOP':
operations.append(operation)
if action is SHIFT:
shifts = [a for a in actions if a.action is SHIFT]
if not shifts:
return False
reductions = [a for a in actions if a.action is REDUCE]
if not reductions:
return True
red_op = reductions[0].prod.rhs[1]
return operations.index(operation) > operations.index(red_op)
elif action is REDUCE:
# Current reduction operation
red_op = production.rhs[1]
# If operation ahead is STOP or is of less or equal priority -> reduce.
return ((operation not in operations)
or (operations.index(operation)
<= operations.index(red_op)))
def test_dynamic_disambiguation():
"""
Test disambiguation determined at run-time based on the input.
This tests LR parsing.
"""
# This grammar is ambiguous if no prefer_shift strategy is used.
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
# But if we provide dynamic disambiguation filter
# the conflicts can be handled at run-time.
p = Parser(g, actions=actions, prefer_shifts=False,
dynamic_filter=custom_disambiguation_filter)
# * operation will be of higher priority as it appears later in the stream.
result1 = p.parse(instr1)
assert result1 == 1 + (2 * 5) + 3
# + operation will be of higher priority here.
result2 = p.parse(instr2)
assert result2 == 1 * (2 + 5) * 3
def test_dynamic_disambiguation_glr():
"""
Test disambiguation determined at run-time based on the input.
This tests GLR parsing.
"""
p = GLRParser(g, actions=actions,
dynamic_filter=custom_disambiguation_filter)
# * operation will be of higher priority as it appears later in the stream.
result1 = p.parse(instr1)
assert len(result1) == 1
assert p.call_actions(result1[0]) == 1 + (2 * 5) + 3
# + operation will be of higher priority here.
result2 = p.parse(instr2)
assert len(result2) == 1
assert p.call_actions(result2[0]) == 1 * (2 + 5) * 3
| mit | -6,702,298,791,674,917,000 | 28.218487 | 79 | 0.633592 | false |
gelbander/retain24wrapper | retain24wrapper/retain24wrapper.py | 1 | 5339 | # -*- coding: utf-8 -*-
import time
from tempfile import NamedTemporaryFile
from xml.etree import cElementTree as ET
from xml.etree.cElementTree import XML
from dicttoxml import dicttoxml
import requests
import xmltodict
ACTIONS = {}
ACTIONS['GET_PROVIDERS'] = {'TA_ACTION': '5-45103'}
ACTIONS['ISSUE'] = {'TA_ACTION': '5-45102'}
ACTIONS['VALIDATE'] = {'TA_ACTION': '5-43101'}
class Provider(object):
def __init__(self, body):
""" Populate an Provider instance base on body data. """
for k, v in body.iteritems():
self.__setattr__(k.replace('@','').lower(), v)
def __repr__(self):
""" Printable representation. """
return ' - '.join([self.name, self.id])
class Retain24Wrapper(object):
"""
Usage::
>>> from retain24wrapper import Retain24Wrapper
>>> r = Retain24Wrapper(base_url, certificate_path)
>>> providers = r.get_providers()
[H&M - 001, Lindex - 002, ICA - 003]
>>> r.issue_valuable(args)
OrderedDict([(u'MSISDN', u'00467311122233'), ... (u'STATUS', u'OK')])
>>> r.validate_valuable(args)
OrderedDict([(u'CPNINFO'...
"""
def __init__(self, base_url, certificate_path):
""" Setup the retain wrapper object. """
self.base_url = base_url
self.certificate_path = certificate_path
self.providers = []
def parse_response(self, resp):
"""Parse response data into a dictionary."""
return xmltodict.parse(resp.content)['TICKETANYWHERE']['COUPON']['RESPONSE']
def populate_xml(self, body, **kwargs):
""" Prepare the xml data to be sent to the api"""
tmp = NamedTemporaryFile(mode='w+b', suffix='xml', delete=True)
root = ET.Element("TICKETANYWHERE")
coupon = ET.SubElement(root, "COUPON", {'VER': '1.0'})
body_xml = XML(dicttoxml(body, root=False, attr_type=False))
if (kwargs.get('body_attrs')):
body_xml.attrib = kwargs.get('body_attrs')
coupon.append(body_xml)
tmp.write('<?xml version="1.0" encoding="ISO-8859-1" ?>')
ET.ElementTree(root).write(tmp)
tmp.seek(0)
file = tmp.read()
tmp.close()
return file
def validate_receipt(self, resp):
""" Parse the issue and send response and checks for errors."""
receipt = self.parse_response(resp)['RECEIPT']
if (receipt['STATUS'] == 'ERROR'):
raise ValueError('ERRORCODE: {error_code} - {message}'.format(
error_code=receipt['ERRORCODE'],
message=receipt['MESSAGE']
))
return receipt
def get_providers(self):
""" Cet currently available providers.
:return: self.providers: A list with available providers.
"""
resp = requests.get(self.base_url, params=ACTIONS['GET_PROVIDERS'], cert=self.certificate_path, verify=True, stream=True)
for template in self.parse_response(resp)['TEMPLATELIST']['TEMPLATE']:
self.providers.append(Provider(template))
return self.providers
def issue_valuable(self, template_id, qty, msisdn, **kwargs):
""" Generate a coupon (aka valuable).
:param template_id: The retain24 id for a clinet/organization
:param qty: The value of coupon 100 = 1 SEK
:param msisdn: Customer id also customers phone number.
:param: email_address: (optional) Customers email.
:param: sms_text: (optional) SMS text.
:param: email_text: (optional) Email text.
:param: send_date: (optional) Date sent.
:return receipt: Receipt
"""
email_address = kwargs.get('email_address', 'None')
sms_text = kwargs.get('sms_text', 'None')
email_text = kwargs.get('email_text', 'None')
send_date = kwargs.get('send_date', time.strftime('%Y-%m-%d %H:%m'))
obj = {
"SEND": {
"TEMPLATE": template_id,
"QTY": qty,
"MSISDN": msisdn,
"EMAIL_ADDRESS": email_address,
"SMS_TEXT": sms_text,
"EMAIL_TEXT": email_text,
"SEND_DATE": send_date,
}
}
xml = self.populate_xml(obj)
resp = requests.post(
self.base_url,
data=xml,
params=ACTIONS['ISSUE'],
cert=self.certificate_path,
verify=True,
stream=True
)
receipt = self.validate_receipt(resp)
return receipt
def validate_valuable(self, msisdn, pin, multicode):
""" Valudate a valuable aka. coupon.
:param multicode: The unique code for a valuable.
:param pin: Pincode, set to empty string if provider doesnt need it.
:param msisdn: Customer id also customers phone number.
"""
obj = {
"VALIDATE": {
"MSISDN": msisdn,
"PIN": pin,
"MULTICODE": multicode
}
}
xml = self.populate_xml(body=obj, body_attrs={'TYPE': 'STANDARD'})
resp = requests.post(
self.base_url,
data=xml,
params=ACTIONS['VALIDATE'],
cert=self.certificate_path,
verify=True,
stream=True
)
return self.parse_response(resp)
| mit | -3,539,120,547,608,042,500 | 30.222222 | 129 | 0.567335 | false |
toddpalino/kafka-tools | kafka/tools/protocol/responses/leader_and_isr_v0.py | 1 | 1199 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from kafka.tools.protocol.responses import BaseResponse
class LeaderAndIsrV0Response(BaseResponse):
schema = [
{'name': 'error', 'type': 'int16'},
{'name': 'partitions',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition', 'type': 'int32'},
{'name': 'error', 'type': 'int16'},
]},
]
| apache-2.0 | -8,818,196,669,233,564,000 | 37.677419 | 62 | 0.674729 | false |
chinfeng/gumpy | huacaya/auth/endpoint.py | 1 | 13593 | # -*- coding: utf-8 -*-
__author__ = 'chinfeng'
import os
import uuid
import json
import datetime
import tornado.web
from tornado.web import HTTPError
from tornado.escape import json_decode
try:
from urllib import urlencode
from urlparse import urlsplit, urlunsplit
except ImportError:
from urllib.parse import urlencode, urlsplit, urlunsplit
import logging
logger = logging.getLogger(__name__)
from .auth import AuthorizationError
def json_default(obj):
if isinstance(obj, datetime.datetime):
return str(obj)
else:
return obj
class BaseHandler(tornado.web.RequestHandler):
def initialize(self, **kwds):
self._auth_server = kwds.get('auth_server', None)
self._auth_provider = kwds.get('auth_provider', None)
self._current_user = None
def prepare(self):
if all((
self.request.method.upper() != 'GET',
self.request.headers.get('content-type').startswith('application/json'),
)):
self.json_data = json_decode(self.request.body)
else:
self.json_data = None
def get_argument(self, name, default=None, strip=True):
if self.json_data:
arg = self.json_data.get(name, default)
return arg.strip() if strip and isinstance(arg, str) else arg
else:
return tornado.web.RequestHandler.get_argument(self, name, default, strip)
def write_error(self, status_code, **kwds):
try:
self.write(kwds)
except TypeError:
tornado.web.RequestHandler.write_error(self, status_code, **kwds)
def get_current_user(self):
if not self._current_user:
account_raw = self.get_secure_cookie('account', None)
self._current_user = json_decode(account_raw) if account_raw else None
return self._current_user
def get_access_token(self):
access_token = self.get_secure_cookie('access_token', None)
if not access_token:
bearer_str = self.request.headers.get('Authorization', None)
if bearer_str:
if bearer_str.startswith('Bearer '):
return bearer_str[7:]
return self.get_argument('access_token', None)
else:
return access_token.decode('utf-8')
class RedirectBaseHandler(BaseHandler):
def send_redirect(self, redirect_uri, args):
self.clear()
url_parts = list(urlsplit(redirect_uri))
url_parts[3] = '&'.join((urlencode({k: v for k, v in args.items() if v is not None}), url_parts[3])).strip('&')
self.redirect(urlunsplit(url_parts))
def send_invalid_request_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='invalid_request', error_description='The request is missing a required parameter.',
))
def send_unsupported_response_type_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='unsupported_response_type',
error_description='The authorization server does not support obtaining an authorization code using this method.',
))
def send_unauthorized_client_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='unauthorized_client',
error_description='The client is not authorized to request an authorization code using this method.',
))
def send_access_denied_error(self, redirect_uri, state=None):
self.send_redirect(redirect_uri, dict(
state=state, error='access_denied',
error_description='The resource owner or authorization server denied the request.',
))
class MainHandler(BaseHandler):
__route__ = r'/?'
def get(self):
self.redirect('/auth/index.html')
class SignUpHandler(BaseHandler):
__route__ = r'/signup'
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self._auth_server.register_account(data)
token_data = self._auth_provider.password_grant(data['username'], data, 'me, all')
logger.debug('access_token: {0}'.format(token_data['access_token']))
self.set_secure_cookie('access_token', token_data['access_token'])
self.set_secure_cookie('refresh_token', token_data['refresh_token'])
self.write(token_data)
class RevokeTokenHandler(BaseHandler):
""" TODO: demonstration without any permission check for now """
__route__ = r'/revoke'
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
token = data.get('token')
self._auth_server.revoke_token(token)
self.write({})
class AccountListHandler(BaseHandler):
__route__ = r'/accounts'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_accounts())))
else:
self.send_error(403)
class TokenListHandler(BaseHandler):
__route__ = r'/tokens'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_tokens()), default=json_default))
else:
self.send_error(403)
class ClientListHandler(BaseHandler):
__route__ = r'/clients'
def get(self):
""" # TODO: demonstration with simple access control fornow """
if self.request.remote_ip == '127.0.0.1':
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(list(self._auth_server.get_clients())))
else:
self.send_error(403)
class AccountInfoHandler(BaseHandler):
__route__ = r'/me'
def get(self):
token = self.get_access_token()
logger.debug('get_access_token: {0}'.format(token))
if token and self._auth_server.verify_scope(token, 'me'):
account = self._auth_server.get_account_by_token(token)
if account:
account.pop('password', None)
self.write(account)
else:
self.send_error(
500, error='server_error',
error_description='account not found',
)
else:
self.set_header(
'WWW-Authenticate',
'Bearer realm="{0}", error="{1}"'.format(
'example', 'access_denied',
)
)
self.set_status(401, 'Unauthorized')
class SignInHandler(BaseHandler):
__route__ = r'/signin'
def post(self):
account = self._auth_server.find_account(self.json_data)
if account:
del account['password']
self.set_secure_cookie('account', json.dumps(account, default=json_default))
self.write({'sign_in': 'success'})
class AuthorizeHandler(RedirectBaseHandler):
__route__ = r'/authorize'
__sign_in_endpoint__ = r'/signin.html'
__auth_endpoint__ = r'/auth.html'
__default_redirect__ = r'/default_callback'
def get(self):
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
redirect_uri = self.get_argument('redirect_uri', None)
response_type = self.get_argument('response_type', None)
client_id = self.get_argument('client_id', None)
scope = self.get_argument('scope', None)
state = self.get_argument('state', None)
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri or self.__default_redirect__, state)
elif response_type not in ('code', 'token'):
self.send_unsupported_response_type_error(redirect_uri, state)
elif not self._auth_server.has_client_id(client_id):
self.send_unauthorized_client_error(redirect_uri, state)
else:
self.send_redirect(self.__sign_in_endpoint__, dict(
response_type=response_type, client_id=client_id,
redirect_uri=redirect_uri, state=state, scope=scope,
))
def post(self):
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
redirect_uri = self.get_argument('redirect_uri', None)
response_type = self.get_argument('response_type', None)
client_id = self.get_argument('client_id', None)
state = self.get_argument('state', None)
scope = self.get_argument('scope', None)
agreed = self.get_argument('agreed', 0)
account = self.get_current_user()
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri or self.__default_redirect__, state)
elif not agreed:
self.send_access_denied_error(redirect_uri, state)
if not (redirect_uri and response_type and client_id):
self.send_invalid_request_error(redirect_uri, state)
elif response_type == 'code':
# https://tools.ietf.org/html/rfc6749#section-4.1.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
if self._auth_server.has_client_id(client_id):
self.send_redirect(redirect_uri, dict(
state=state,
code=self._auth_provider.authorization_request(account['username'], client_id, redirect_uri, scope)
))
else:
self.send_unauthorized_client_error(redirect_uri, state)
elif response_type == 'token':
# https://tools.ietf.org/html/rfc6749#section-4.2.1
# 暂无默认 redirect callback 机制,所以 redirect_uri 必要参数
if self._auth_server.has_client_id(client_id):
access_token_data = self._auth_provider.implicit_grant(account['username'], client_id, redirect_uri, scope)
self.send_redirect(redirect_uri, dict(
state=state, expires_in=access_token_data['expires_in'],
token_type=access_token_data['token_type'], access_token=access_token_data['access_token'],
))
else:
self.send_unauthorized_client_error(redirect_uri, state)
else:
self.send_unsupported_response_type_error(redirect_uri, state)
class GrantHandler(BaseHandler):
__route__ = r'/grant'
def post(self):
grant_type = self.get_argument('grant_type', None)
if grant_type == 'authorization_code':
authorization_code = self.get_argument('code', None)
client_id = self.get_argument('client_id', None)
redirect_uri = self.get_argument('redirect_uri', None)
try:
self.write(
self._auth_provider.authorization_code_grant(
authorization_code, client_id, redirect_uri
)
)
except BaseException as err:
self.send_error(400, **err.args[0])
elif grant_type == 'refresh_token':
# Refreshing an Access Token
# https://tools.ietf.org/html/rfc6749#section-6
try:
self.write(
self._auth_provider.refresh_token_grant(self.get_argument('refresh_token', None))
)
except BaseException as err:
self.send_error(400, **err.args[0])
elif grant_type == 'password':
username = self.get_argument('username', None)
password = self.get_argument('password', None)
scope = self.get_argument('scope', None)
try:
token_data = self._auth_server.password_grant(
username, {'username': username, 'password': password}, scope)
self.write(token_data)
except AuthorizationError:
self.send_error(400, error='invalid_request')
elif grant_type:
self.send_error(
400, error='unsupported_grant_type',
error_description='The authorization grant type is not supported by the authorization server.',
)
class EndpointApplication(tornado.web.Application):
def __init__(self, auth_server, auth_provider):
self._auth_server = auth_server
self._auth_provider = auth_provider
super(self.__class__, self).__init__(
self.get_handlers(auth_server=auth_server, auth_provider=auth_provider),
cookie_secret=uuid.uuid4().hex
)
def get_handlers(self, **kwds):
handlers = [
MainHandler, SignUpHandler, AuthorizeHandler, GrantHandler, AccountInfoHandler,
RevokeTokenHandler, AccountListHandler, TokenListHandler, ClientListHandler, SignInHandler,
]
for handler in handlers:
yield (handler.__route__, handler, kwds)
static_path = os.path.join(os.path.dirname(__file__), 'static')
yield (r'/(.*)', tornado.web.StaticFileHandler, dict(path=static_path))
| lgpl-3.0 | -4,256,504,983,164,449,000 | 39.875758 | 127 | 0.602046 | false |
kobronson/cs-voltdb | tests/scripts/examples/sql_coverage/partial-covering-schema.py | 1 | 1941 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2013 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
{
"PARTIAL_COVERING_TREE" : {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("A1", FastSerializer.VOLTTYPE_INTEGER),
("A2", FastSerializer.VOLTTYPE_INTEGER),
("A3", FastSerializer.VOLTTYPE_INTEGER),
("A4", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": (),
"indexes": ("ID")
},
"PARTIAL_COVERING_HASH" : {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("A1", FastSerializer.VOLTTYPE_INTEGER),
("A2", FastSerializer.VOLTTYPE_INTEGER),
("A3", FastSerializer.VOLTTYPE_INTEGER),
("A4", FastSerializer.VOLTTYPE_INTEGER)),
"partitions": (),
"indexes": ("ID")
}
}
| agpl-3.0 | -6,313,715,119,694,214,000 | 42.133333 | 72 | 0.655332 | false |
markovmodel/thermotools | test/test_util.py | 1 | 10124 | # This file is part of thermotools.
#
# Copyright 2015, 2016 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# thermotools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import thermotools.util as util
import numpy as np
from nose.tools import assert_true
from numpy.testing import assert_array_equal, assert_almost_equal
####################################################################################################
# sorting
####################################################################################################
def test_mixed_sort_reverse():
# testing against numpy.sort()
x = np.ascontiguousarray(np.arange(1000)[::-1].astype(np.float64))
y = np.sort(x)
util.mixed_sort(x)
assert_array_equal(x, y)
def test_mixed_sort_random():
# testing against numpy.sort()
x = np.random.rand(1000).astype(np.float64)
y = np.sort(x)
util.mixed_sort(x)
assert_array_equal(x, y)
####################################################################################################
# direct summation schemes
####################################################################################################
def test_kahan_summation():
# np.sum() fails for this array when unsorted
array = np.array([1.0E-8, 1.0, 1.0E+8] * 100000, dtype=np.float64)
result = util.kahan_summation(array, sort_array=False)
assert_true(result == 10000000100000.001)
result = util.kahan_summation(array, sort_array=True, inplace=False)
assert_true(result == 10000000100000.001)
result = util.kahan_summation(array, sort_array=True, inplace=True)
assert_true(result == 10000000100000.001)
####################################################################################################
# logspace summation schemes
####################################################################################################
def test_logsumexp_zeros():
N = 10000
data = np.zeros(shape=(N,), dtype=np.float64)
assert_almost_equal(util.logsumexp(data, inplace=False), np.log(N), decimal=15)
assert_almost_equal(util.logsumexp(-data, inplace=False), np.log(N), decimal=15)
def test_logsumexp_converged_geometric_series():
data = np.ascontiguousarray(np.arange(10000)[::-1].astype(np.float64))
assert_almost_equal(
util.logsumexp(-data, inplace=False, sort_array=False, use_kahan=False),
0.45867514538708193, decimal=15)
assert_almost_equal(
util.logsumexp(-data, inplace=False, sort_array=False, use_kahan=True),
0.45867514538708193, decimal=15)
assert_almost_equal(
util.logsumexp(-data, inplace=False, sort_array=True, use_kahan=False),
0.45867514538708193, decimal=15)
assert_almost_equal(
util.logsumexp(-data, inplace=False, sort_array=True, use_kahan=True),
0.45867514538708193, decimal=15)
assert_almost_equal(
util.logsumexp(-data, inplace=True, sort_array=True, use_kahan=True),
0.45867514538708193, decimal=15)
def test_logsumexp_truncated_diverging_geometric_series():
data = np.ascontiguousarray(np.arange(10000)[::-1].astype(np.float64))
assert_almost_equal(
util.logsumexp(data, inplace=False, sort_array=False, use_kahan=False),
9999.4586751453862, decimal=15)
assert_almost_equal(
util.logsumexp(data, inplace=False, sort_array=False, use_kahan=True),
9999.4586751453862, decimal=15)
assert_almost_equal(
util.logsumexp(data, inplace=False, sort_array=True, use_kahan=False),
9999.4586751453862, decimal=15)
assert_almost_equal(
util.logsumexp(data, inplace=False, sort_array=True, use_kahan=True),
9999.4586751453862, decimal=15)
assert_almost_equal(
util.logsumexp(data, inplace=True, sort_array=True, use_kahan=True),
9999.4586751453862, decimal=15)
def test_logsumexp_pair():
assert_almost_equal(util.logsumexp_pair(0.0, 0.0), np.log(2.0), decimal=15)
assert_almost_equal(util.logsumexp_pair(1.0, 1.0), 1.0 + np.log(2.0), decimal=15)
assert_almost_equal(util.logsumexp_pair(10.0, 10.0), 10.0 + np.log(2.0), decimal=15)
assert_almost_equal(util.logsumexp_pair(100.0, 100.0), 100.0 + np.log(2.0), decimal=15)
assert_almost_equal(util.logsumexp_pair(1000.0, 1000.0), 1000.0 + np.log(2.0), decimal=15)
assert_almost_equal(util.logsumexp_pair(10.0, 0.0), 10.000045398899218, decimal=15)
assert_almost_equal(util.logsumexp_pair(0.0, 10.0), 10.000045398899218, decimal=15)
assert_almost_equal(util.logsumexp_pair(100.0, 0.0), 100.0, decimal=15)
assert_almost_equal(util.logsumexp_pair(0.0, 100.0), 100.0, decimal=15)
assert_almost_equal(util.logsumexp_pair(1000.0, 0.0), 1000.0, decimal=15)
assert_almost_equal(util.logsumexp_pair(0.0, 1000.0), 1000.0, decimal=15)
####################################################################################################
# counting states and transitions
####################################################################################################
def test_break_points_us_like_trajs():
X = 2000
T = 100
for K in range(T):
bp = util.get_therm_state_break_points(np.ones(shape=(X,), dtype=np.intc) * K)
assert_true(bp.shape[0] == 1)
assert_true(bp[0] == 0)
def test_break_points_st_like_trajs():
bp = util.get_therm_state_break_points(np.arange(1000).astype(np.intc))
assert_true(bp.shape[0] == 1000)
assert_array_equal(bp, np.array(range(1000), dtype=np.intc))
bp = util.get_therm_state_break_points(
np.array([0] * 10 + [1] * 20 + [0] * 30 + [1], dtype=np.intc))
assert_true(bp.shape[0] == 4)
assert_array_equal(bp, np.array([0, 10, 30, 60], dtype=np.intc))
def test_count_matrices_single_counts():
dtrajs = [
np.array([0, 0, 1, 1, 2, 2, 0, 2, 1, 0], dtype=np.intc),
np.array([0, 0, 1, 1, 2, 2, 0, 2, 1, 0], dtype=np.intc)]
ttrajs = [np.array([0] * 10, dtype=np.intc), np.array([1] * 10, dtype=np.intc)]
# dtraj = [
# np.array(
# [[0, 0], [0, 0], [0, 1], [0, 1], [0, 2], [0, 2], [0, 0], [0, 2], [0, 1], [0, 0]],
# dtype=np.intc),
# np.array(
# [[1, 0], [1, 0], [1, 1], [1, 1], [1, 2], [1, 2], [1, 0], [1, 2], [1, 1], [1, 0]],
# dtype=np.intc)]
C_K = util.count_matrices(ttrajs, dtrajs, 1, sparse_return=False)
ref = np.ones(shape=(2, 3, 3), dtype=np.intc)
assert_array_equal(C_K, ref)
def test_count_matrices_st_traj():
ttraj = [np.array([0, 0, 1, 1, 1, 0, 0, 2, 2, 2, 0, 0], dtype=np.intc)]
dtraj = [np.array([0, 0, 0, 1, 0, 1, 1, 1, 2, 1, 2, 2], dtype=np.intc)]
C_K = util.count_matrices(ttraj, dtraj, 1, sliding=True, sparse_return=False, nthermo=4, nstates=4)
ref = np.zeros(shape=(4, 4, 4), dtype=np.intc)
ref[0, 0, 0] = 1
ref[0, 1, 1] = 1
ref[0, 2, 2] = 1
ref[1, 0, 1] = 1
ref[1, 1, 0] = 1
ref[2, 1, 2] = 1
ref[2, 2, 1] = 1
assert_array_equal(C_K, ref)
def test_state_counts():
ttrajs = [np.zeros(shape=(10,), dtype=np.intc), 2 * np.ones(shape=(20,), dtype=np.intc)]
dtrajs = [np.zeros(shape=(10,), dtype=np.intc), 2 * np.ones(shape=(20,), dtype=np.intc)]
ref = np.array([[10, 0, 0, 0], [0] * 4, [0, 0, 20, 0], [0] * 4, [0] * 4], dtype=np.intc)
N = util.state_counts(ttrajs, dtrajs, nthermo=5, nstates=4)
assert_array_equal(N, ref)
def test_restriction():
T = 10
M = 100
X = 1000
state_sequence = np.array([[0, i] for i in range(M)] * 10, dtype=np.intc)
bias_energy_sequence = np.ascontiguousarray(
np.array([[i] * T for i in range(X)], dtype=np.float64).transpose())
cset = [i for i in range(M) if i % 2 == 0]
ref_state_sequence = np.array([[0, i] for i in range(int(M / 2))] * 10, dtype=np.intc)
ref_bias_energy_sequence = np.ascontiguousarray(
np.array([[i] * T for i in range(X) if i % 2 == 0], dtype=np.float64).transpose())
new_state_sequence, new_bias_energy_sequence = util.restrict_samples_to_cset(
state_sequence, bias_energy_sequence, cset)
assert_array_equal(new_state_sequence, ref_state_sequence)
assert_array_equal(new_bias_energy_sequence, ref_bias_energy_sequence)
####################################################################################################
# bias calculation tools
####################################################################################################
def test_get_umbrella_bias_binary():
nsamples = 100
nthermo = 2
ndim = 3
traj = np.linspace(0.0, 2.0, nsamples)
for _i in range(1, ndim):
traj = np.vstack((traj, np.linspace(0.0, 2.0, nsamples)))
traj = np.ascontiguousarray(traj.T, dtype=np.float64)
umbrella_centers = np.zeros(shape=(nthermo, ndim), dtype=np.float64)
umbrella_centers[1, :] = 1.0
force_constants = np.array([
np.zeros(shape=(ndim, ndim), dtype=np.float64), np.eye(ndim, dtype=np.float64)])
width = np.zeros(shape=(ndim,), dtype=np.float64)
bias = util.get_umbrella_bias(traj, umbrella_centers, force_constants, width)
ref = np.vstack((
np.zeros(shape=(nsamples)),
0.5 * ndim * np.linspace(-1.0, 1.0, nsamples)**2)).T.astype(np.float64)
assert_almost_equal(bias, ref, decimal=15)
####################################################################################################
# transition matrix renormalization
####################################################################################################
| lgpl-3.0 | -6,054,468,994,515,322,000 | 46.530516 | 103 | 0.568352 | false |
sigmavirus24/pip | tests/functional/test_freeze.py | 1 | 16681 | import sys
import os
import re
import textwrap
import pytest
from doctest import OutputChecker, ELLIPSIS
from tests.lib import _create_test_package, _create_test_package_with_srcdir
distribute_re = re.compile('^distribute==[0-9.]+\n', re.MULTILINE)
def _check_output(result, expected):
checker = OutputChecker()
actual = str(result)
# FIXME! The following is a TOTAL hack. For some reason the
# __str__ result for pkg_resources.Requirement gets downcased on
# Windows. Since INITools is the only package we're installing
# in this file with funky case requirements, I'm forcibly
# upcasing it. You can also normalize everything to lowercase,
# but then you have to remember to upcase <BLANKLINE>. The right
# thing to do in the end is probably to find out how to report
# the proper fully-cased package name in our error message.
if sys.platform == 'win32':
actual = actual.replace('initools', 'INITools')
# This allows our existing tests to work when run in a context
# with distribute installed.
actual = distribute_re.sub('', actual)
def banner(msg):
return '\n========== %s ==========\n' % msg
assert checker.check_output(expected, actual, ELLIPSIS), (
banner('EXPECTED') + expected + banner('ACTUAL') + actual +
banner(6 * '=')
)
def test_freeze_basic(script):
"""
Some tests of freeze, first we have to install some stuff. Note that
the test is a little crude at the end because Python 2.5+ adds egg
info to the standard library, so stuff like wsgiref will show up in
the freezing. (Probably that should be accounted for in pip, but
currently it is not).
"""
script.scratch_path.join("initools-req.txt").write(textwrap.dedent("""\
simple==2.0
# and something else to test out:
simple2<=3.0
"""))
script.pip_install_local(
'-r', script.scratch_path / 'initools-req.txt',
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...simple==2.0
simple2==3.0...
<BLANKLINE>""")
_check_output(result.stdout, expected)
def test_freeze_with_pip(script):
"""Test pip shows itself"""
result = script.pip('freeze', '--all')
assert 'pip==' in result.stdout
def test_freeze_with_invalid_names(script):
"""
Test that invalid names produce warnings and are passed over gracefully.
"""
def fake_install(pkgname, dest):
egg_info_path = os.path.join(
dest, '{0}-1.0-py{1}.{2}.egg-info'.format(
pkgname.replace('-', '_'),
sys.version_info[0],
sys.version_info[1]
)
)
with open(egg_info_path, 'w') as egg_info_file:
egg_info_file.write(textwrap.dedent("""\
Metadata-Version: 1.0
Name: {0}
Version: 1.0
""".format(pkgname)
))
valid_pkgnames = ('middle-dash', 'middle_underscore', 'middle.dot')
invalid_pkgnames = (
'-leadingdash', '_leadingunderscore', '.leadingdot',
'trailingdash-', 'trailingunderscore_', 'trailingdot.'
)
for pkgname in valid_pkgnames + invalid_pkgnames:
fake_install(pkgname, script.site_packages_path)
result = script.pip('freeze', expect_stderr=True)
for pkgname in valid_pkgnames:
_check_output(
result.stdout,
'...{0}==1.0...'.format(pkgname.replace('_', '-'))
)
for pkgname in invalid_pkgnames:
_check_output(
result.stderr,
'...Could not parse requirement: {0}\n...'.format(
pkgname.replace('_', '-')
)
)
@pytest.mark.svn
def test_freeze_svn(script, tmpdir):
"""Test freezing a svn checkout"""
checkout_path = _create_test_package(script, vcs='svn')
# Install with develop
script.run(
'python', 'setup.py', 'develop',
cwd=checkout_path, expect_stderr=True
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...-e svn+...#egg=version_pkg
...""")
_check_output(result.stdout, expected)
@pytest.mark.git
@pytest.mark.xfail
def test_freeze_exclude_editable(script, tmpdir):
"""
Test excluding editable from freezing list.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', '--exclude-editable', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_clone(script, tmpdir):
"""
Test freezing a Git clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
-e git+...#egg=version_pkg
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
# Check that slashes in branch or tag names are translated.
# See also issue #1083: https://github.com/pypa/pip/issues/1083
script.run(
'git', 'checkout', '-b', 'branch/name/with/slash',
cwd=repo_dir,
expect_stderr=True,
)
# Create a new commit to ensure that the commit has only one branch
# or tag name associated to it (to avoid the non-determinism reported
# in issue #1867).
script.run('touch', 'newfile', cwd=repo_dir)
script.run('git', 'add', 'newfile', cwd=repo_dir)
script.run('git', 'commit', '-m', '...', cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e ...@...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_clone_srcdir(script, tmpdir):
"""
Test freezing a Git clone where setup.py is in a subdirectory
relative the repo root and the source code is in a subdirectory
relative to setup.py.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package_with_srcdir(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir / 'subdir',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+...#egg=version_pkg&subdirectory=subdir
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
-e git+...#egg=version_pkg&subdirectory=subdir
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
@pytest.mark.git
def test_freeze_git_remote(script, tmpdir):
"""
Test freezing a Git clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script)
result = script.run(
'git', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
origin_remote = pkg_version
other_remote = pkg_version + '-other'
# check frozen remote after clone
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=origin_remote).strip()
_check_output(result.stdout, expected)
# check frozen remote when there is no remote named origin
script.run('git', 'remote', 'remove', 'origin', cwd=repo_dir)
script.run('git', 'remote', 'add', 'other', other_remote, cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=other_remote).strip()
_check_output(result.stdout, expected)
# when there are more than one origin, priority is given to the
# remote named origin
script.run('git', 'remote', 'add', 'origin', origin_remote, cwd=repo_dir)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e git+{remote}@...#egg=version_pkg
...
"""
).format(remote=origin_remote).strip()
_check_output(result.stdout, expected)
@pytest.mark.mercurial
def test_freeze_mercurial_clone(script, tmpdir):
"""
Test freezing a Mercurial clone.
"""
# Returns path to a generated package called "version_pkg"
pkg_version = _create_test_package(script, vcs='hg')
result = script.run(
'hg', 'clone', pkg_version, 'pip-test-package',
expect_stderr=True,
)
repo_dir = script.scratch_path / 'pip-test-package'
result = script.run(
'python', 'setup.py', 'develop',
cwd=repo_dir,
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent(
"""
...-e hg+...#egg=version_pkg
...
"""
).strip()
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f', '%s#egg=pip_test_package' % repo_dir,
expect_stderr=True,
)
expected = textwrap.dedent(
"""
-f %(repo)s#egg=pip_test_package...
...-e hg+...#egg=version_pkg
...
""" % {'repo': repo_dir},
).strip()
_check_output(result.stdout, expected)
@pytest.mark.bzr
def test_freeze_bazaar_clone(script, tmpdir):
"""
Test freezing a Bazaar clone.
"""
try:
checkout_path = _create_test_package(script, vcs='bazaar')
except OSError as e:
pytest.fail('Invoking `bzr` failed: %s' % e)
result = script.run(
'bzr', 'checkout', checkout_path, 'bzr-package'
)
result = script.run(
'python', 'setup.py', 'develop',
cwd=script.scratch_path / 'bzr-package',
expect_stderr=True,
)
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
...-e bzr+file://...@1#egg=version_pkg
...""")
_check_output(result.stdout, expected)
result = script.pip(
'freeze', '-f',
'%s/#egg=django-wikiapp' % checkout_path,
expect_stderr=True,
)
expected = textwrap.dedent("""\
-f %(repo)s/#egg=django-wikiapp
...-e bzr+file://...@...#egg=version_pkg
...""" % {'repo': checkout_path})
_check_output(result.stdout, expected)
def test_freeze_with_local_option(script):
"""
Test that wsgiref (from global site-packages) is reported normally, but not
with --local.
"""
result = script.pip_install_local('initools==0.2')
result = script.pip('freeze', expect_stderr=True)
expected = textwrap.dedent("""\
INITools==0.2
wsgiref==...
<BLANKLINE>""")
# The following check is broken (see
# http://bitbucket.org/ianb/pip/issue/110). For now we are simply
# neutering this test, but if we can't find a way to fix it,
# this whole function should be removed.
# _check_output(result, expected)
result = script.pip('freeze', '--local', expect_stderr=True)
expected = textwrap.dedent("""\
INITools==0.2
<BLANKLINE>""")
_check_output(result.stdout, expected)
# used by the test_freeze_with_requirement_* tests below
_freeze_req_opts = textwrap.dedent("""\
# Unchanged requirements below this line
-r ignore.txt
--requirement ignore.txt
-Z ignore
--always-unzip ignore
-f http://ignore
-i http://ignore
--pre
--trusted-host url
--process-dependency-links
--extra-index-url http://ignore
--find-links http://ignore
--index-url http://ignore
""")
def test_freeze_with_requirement_option(script):
"""
Test that new requirements are created correctly with --requirement hints
"""
script.scratch_path.join("hint.txt").write(textwrap.dedent("""\
INITools==0.1
NoExist==4.2 # A comment that ensures end of line comments work.
simple==3.0; python_version > '1.0'
""") + _freeze_req_opts)
result = script.pip_install_local('initools==0.2')
result = script.pip_install_local('simple')
result = script.pip(
'freeze', '--requirement', 'hint.txt',
expect_stderr=True,
)
expected = textwrap.dedent("""\
INITools==0.2
simple==3.0
""")
expected += _freeze_req_opts
expected += "## The following requirements were added by pip freeze:..."
_check_output(result.stdout, expected)
assert (
"Requirement file [hint.txt] contains NoExist==4.2, but that package "
"is not installed"
) in result.stderr
def test_freeze_with_requirement_option_multiple(script):
"""
Test that new requirements are created correctly with multiple
--requirement hints
"""
script.scratch_path.join('hint1.txt').write(textwrap.dedent("""\
INITools==0.1
NoExist==4.2
simple==3.0; python_version > '1.0'
""") + _freeze_req_opts)
script.scratch_path.join('hint2.txt').write(textwrap.dedent("""\
NoExist2==2.0
simple2==1.0
""") + _freeze_req_opts)
result = script.pip_install_local('initools==0.2')
result = script.pip_install_local('simple')
result = script.pip_install_local('simple2==1.0')
result = script.pip_install_local('meta')
result = script.pip(
'freeze', '--requirement', 'hint1.txt', '--requirement', 'hint2.txt',
expect_stderr=True,
)
expected = textwrap.dedent("""\
INITools==0.2
simple==1.0
""")
expected += _freeze_req_opts
expected += textwrap.dedent("""\
simple2==1.0
""")
expected += "## The following requirements were added by pip freeze:"
expected += os.linesep + textwrap.dedent("""\
...meta==1.0...
""")
_check_output(result.stdout, expected)
assert (
"Requirement file [hint1.txt] contains NoExist==4.2, but that "
"package is not installed"
) in result.stderr
assert (
"Requirement file [hint2.txt] contains NoExist2==2.0, but that "
"package is not installed"
) in result.stderr
# any options like '--index-url http://ignore' should only be emitted once
# even if they are listed in multiple requirements files
assert result.stdout.count("--index-url http://ignore") == 1
def test_freeze_user(script, virtualenv):
"""
Testing freeze with --user, first we have to install some stuff.
"""
virtualenv.system_site_packages = True
script.pip_install_local('--user', 'simple==2.0')
script.pip_install_local('simple2==3.0')
result = script.pip('freeze', '--user', expect_stderr=True)
expected = textwrap.dedent("""\
simple==2.0
<BLANKLINE>""")
_check_output(result.stdout, expected)
assert 'simple2' not in result.stdout
| mit | -4,954,706,562,733,606,000 | 30.121269 | 79 | 0.591032 | false |
provideyourown/SiteMonitoring | memoryusage.py | 1 | 1362 | #!/usr/bin/env python
"""
Display the system memory usage. Can be called on a remote server or use 'local' or 'localhost' for your computer
Usage:
./memoryusage.py MYSERVER
"""
import argparse
import subprocess
def getMemoryUsage(server):
"""
Returns the cpu load as a value from the interval [0.0, 1.0]
"""
if server in ['local', 'localhost']:
result = subprocess.check_output('free -m', shell=True)
else:
result = subprocess.check_output('ssh %s "free -m"' % server, shell=True)
lines = result.split('\n')
toks = lines[2].split() # split along whitespace
used = int(toks[2])
free = int(toks[3])
total = used + free
toks = lines[3].split()
swap = float(toks[2]) / float(toks[1]) if int(toks[1]) else 0
return used, total, swap
if __name__ == '__main__': # allow funcs above to be imported as a module
parser = argparse.ArgumentParser(description='Get memory usage for a server/computer.')
parser.add_argument("server", help='Enter server name as defined in ~/.ssh/config or user@ip. NB: public key should be uploaded to server. For local computer use either local or localhost')
args = parser.parse_args()
used, total, swap = getMemoryUsage(args.server)
print "Memory usage: {:.2f}% of {}Mb (swap: {:.2f}%)".format(100.0*used/total, total, swap*100)
exit()
| gpl-3.0 | 2,848,168,859,462,388,700 | 31.428571 | 193 | 0.654185 | false |
Naeka/vosae-app | www/invoicing/models/payment.py | 1 | 3144 | # -*- coding:Utf-8 -*-
from mongoengine import Document, fields
from django.utils.timezone import now
import decimal
from core.fields import DateField
from invoicing import PAYMENT_TYPES, currency_format
from invoicing.exceptions import (
InvalidPaymentAmount,
)
__all__ = (
'Payment',
'InvoicePayment',
'DownPaymentInvoicePayment',
)
class Payment(Document):
"""
A payment, representing money flows within the company.
Amount can be negative (debit) or positive (credit).
"""
TYPES = PAYMENT_TYPES
tenant = fields.ReferenceField("Tenant", required=True)
issuer = fields.ReferenceField("VosaeUser", required=True)
issued_at = fields.DateTimeField(required=True, default=now)
amount = fields.DecimalField(required=True)
currency = fields.ReferenceField("Currency", required=True)
type = fields.StringField(required=True, choices=TYPES, default="CHECK")
date = DateField(required=True)
note = fields.StringField(max_length=512)
meta = {
"allow_inheritance": True
}
def __unicode__(self):
if self.date and self.amount and self.currency:
return u'%s: %s' % (self.date, currency_format(self.amount, self.currency.symbol, True))
return '%s object' % self.__class__.__name__
@classmethod
def pre_save(self, sender, document, **kwargs):
"""
Pre save hook handler
Validates payment amount
"""
# If amount set from float (not from string), the rounding is only done on init or on save
# So, we round here to prevent incorrect comparison
document.amount = document.amount.quantize(decimal.Decimal('.00'), decimal.ROUND_HALF_UP)
if document.amount < 0 or document.amount > document.related_to.balance:
raise InvalidPaymentAmount()
@classmethod
def post_save(self, sender, document, created, **kwargs):
"""
Post save hook handler
- Associates payment to related document
- Creates a payment statistic entry
"""
from vosae_statistics.models import PaymentStatistics
if created:
document.related_to.payments.append(document)
document.related_to.save()
# XXX: Should save organization/contact/address
payment_statistic = PaymentStatistics(
tenant=document.tenant,
date=document.date,
amount=document.amount,
payment=document
).save()
@classmethod
def post_delete(self, sender, document, **kwargs):
"""
Post delete hook handler
Removes payment from related document
"""
document.related_to.payments.pop(document)
document.related_to.save()
class InvoicePayment(Payment):
"""Payment related to an :class:`~invoicing.models.Invoice`"""
related_to = fields.ReferenceField("Invoice", required=True, dbref=False)
class DownPaymentInvoicePayment(Payment):
"""Payment related to an :class:`~invoicing.models.DownPaymentInvoice`"""
related_to = fields.ReferenceField("DownPaymentInvoice", required=True, dbref=False)
| agpl-3.0 | -8,912,325,496,183,845,000 | 29.823529 | 100 | 0.66285 | false |
BTCfork/hardfork_prototype_1_mvf-core | qa/rpc-tests/mvf-core-csig.py | 1 | 9119 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2016 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# MVF-Core
"""
Exercise the signature change (replay protection) code.
Derived from walletbackupauto.py.
Test case is:
4 nodes - 2 forking and 2 non-forking, sending transactions between each other.
Prior to the fork, anything goes.
Post fork, the nodes of the same kind can still send between each other,
but not to the nodes of the other kind (2 way check).
"""
import os
import fnmatch
import hashlib
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from random import randint
import logging
import time
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
FORKHEIGHT = 120
class ReplayProtectionTest(BitcoinTestFramework):
def setup_chain(self):
#logging.info("Initializing test directory "+self.options.tmpdir)
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
#logging.info("Starting nodes")
print("Starting nodes")
# all nodes are spenders, let's give them a keypool=100
self.extra_args = [
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100"],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT],
['-debug', '-whitelist=127.0.0.1', "-keypool=100", "-forkheight=%s"%FORKHEIGHT]]
self.nodes = start_nodes(4, self.options.tmpdir, self.extra_args)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[3], 2)
self.is_network_split=False
self.sync_all()
def send_and_check(self, from_node, to_node, expect_to_succeed=True, force_sync=True, check=True, check_for_fail=False):
''' try sending 0.1 BTC from one node to another,
and optionally check if successful '''
to_addr = self.nodes[to_node].getnewaddress()
amount = Decimal(1) / Decimal(10)
txid = self.nodes[from_node].sendtoaddress(to_addr, amount)
if force_sync:
sync_mempools([self.nodes[from_node], self.nodes[to_node]])
else:
time.sleep(1)
if check:
if check_for_fail:
assert_equal(txid in self.nodes[from_node].getrawmempool(), True)
assert_equal(txid in self.nodes[to_node].getrawmempool(), False)
else:
assert_equal(txid in self.nodes[from_node].getrawmempool() and (txid in self.nodes[to_node].getrawmempool() or not expect_to_succeed), True)
return txid
def run_test(self):
#logging.info("Fork height configured for block %s"%(FORKHEIGHT))
print("Fork height configured for block %s"%(FORKHEIGHT))
#logging.info("Generating initial 104 blocks")
print("Generating initial 104 blocks")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(101)
sync_blocks(self.nodes)
#logging.info("Current height %s blocks"%(self.nodes[0].getblockcount()))
print("Current height %s blocks"%(self.nodes[0].getblockcount()))
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 50)
assert_equal(self.nodes[0].getblockcount(), 104)
#logging.info("Check all sending works after setup")
print("Check all sending works after setup")
# from any node to the others should be ok now
# this should generate 4*3 = 12 more blocks
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
self.send_and_check(src_node, dst_node, True)
self.nodes[dst_node].generate(1)
sync_blocks(self.nodes)
current_height = self.nodes[0].getblockcount()
assert_equal(current_height, 116)
# generate blocks, one on each node in turn, until we reach pre-fork block height
blocks_to_fork = FORKHEIGHT - current_height - 1
self.nodes[0].generate(blocks_to_fork)
# not sure why this loop didn't work reliably...
# maybe it was the round-robin generation
while False: #blocks_to_fork > 0:
#logging.info("blocks left to fork height: %d" % blocks_to_fork)
print("blocks left to fork height: %d" % blocks_to_fork)
self.nodes[blocks_to_fork % 4].generate(1)
blocks_to_fork -= 1
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT - 1)
#logging.info("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
print("Current height %s blocks (pre-fork block)"%(self.nodes[0].getblockcount()))
# check that we can still send to all other nodes for the pre-fork block
# collect a bunch of tx's sent by the nodes to each other
#logging.info("sending tx's between all nodes at pre-fork")
print("sending tx's between all nodes at pre-fork")
should_be_fine_txs = []
for src_node in range(4):
for dst_node in range(4):
if src_node != dst_node:
#logging.info("... from %d to %d" %(src_node, dst_node))
print("... from %d to %d" %(src_node, dst_node))
should_be_fine_txs.append(self.send_and_check(src_node, dst_node, True))
#logging.info("Verifying tx's were still accepted by all nodes")
print("Verifying tx's were still accepted by all nodes")
sync_mempools(self.nodes)
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], True)
# generate the fork block
#logging.info("Generate fork block at height %s" % FORKHEIGHT)
print("Generate fork block at height %s" % FORKHEIGHT)
self.nodes[0].generate(1)
# check the previous round of tx's not in mempool anymore
self.sync_all()
assert_equal(self.nodes[0].getblockcount(), FORKHEIGHT)
#logging.info("Verifying tx's no longer in any mempool")
print("Verifying tx's no longer in any mempool")
mempools = [self.nodes[i].getrawmempool() for i in range(4)]
for tx in should_be_fine_txs:
for n in range(4):
assert_equal(tx in mempools[n], False)
# check that now, only nodes of the same kind can transact
# these pairs should work fine
#logging.info("Checking transactions between same-kind nodes")
print("Checking transactions between same-kind nodes")
for pair in ((0,1), (1,0), (2,3), (3,2)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], True)
# re-connect the nodes which have been disconnected due to the
# above post-fork transactions, so we can test them separately
#logging.info("Re-connecting nodes which disconnected due to prior step")
print("Re-connecting nodes which disconnected due to prior step")
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,1,3)
#logging.info("Checking transactions between forked/unforked nodes")
print("Checking transactions between forked/unforked nodes")
# these should not work anymore
# MVF-Core TODO: decide whether to accept old-style signatures post-fork (maybe limited-time only?)
# if you only want to deny new->old, then use the commented out code
#for pair in ((2,0), (2,1), (3,0), (3,1)):
# check both forked->unforked and vice versa are blocked now
for pair in ((0,2), (0,3), (1,2), (1,3), (2,0), (2,1), (3,0), (3,1)):
#logging.info("... from %d to %d" %(pair[0], pair[1]))
print("... from %d to %d" %(pair[0], pair[1]))
self.send_and_check(pair[0], pair[1], expect_to_succeed=False, force_sync=False, check=True, check_for_fail=True)
if __name__ == '__main__':
ReplayProtectionTest().main()
| mit | -2,729,698,025,060,871,000 | 43.26699 | 156 | 0.617173 | false |
DBeath/flask-feedrsub | feedrsub/utils/feeds/feed_generation.py | 1 | 1207 | from flask import current_app as app
from typing import List, Dict
from flask import url_for
def websub_discovery_link() -> Dict:
"""
Creates a WebSub discovery link
:return: link as dict
"""
hub_enabled = app.config.get("HUB_ENABLED", False)
if hub_enabled:
hub_url = url_for(
"websub.hub_endpoint",
_external=True,
_scheme=app.config.get("HTTP_SCHEME", "http"),
)
return dict(href=hub_url, rel="hub")
return dict()
def links(links: List[Dict] = None) -> List:
"""
Creates a list of links to add to the feed
:param links: List of Dicts with href and rel keys
:return: list of links as dicts
"""
if not links:
links = []
links_list = []
websub_link = websub_discovery_link()
if websub_link:
links.append(websub_link)
for item in links:
if "href" in item and "rel" in item:
links_list.append(item)
return links_list
def generator():
"""
Returns the generator of the feed
:return: tuple of generator name, location, version
"""
return (app.config["PROJECT_NAME"], app.config.get("SERVER_NAME", None), "1.0")
| mit | -544,283,231,300,035,600 | 22.211538 | 83 | 0.59652 | false |
JonathanFrederick/job-hunt | company_scripts.py | 1 | 1197 | from selenium import webdriver
from companies.red_hat import red_hat
from app import db
from models import Company
def print_result(info):
"""Takes in a dictionary with keys for 'company', 'title', 'url',
and 'description' and prints them neatly to the terminal"""
for key in ['company', 'title', 'url', 'description']:
assert key in info.keys(), \
"The key '{}' is not in the dictionary".format(key)
assert isinstance(info[key], str), \
"The value at '{}' is not a string".format(key)
print('{} - {}'.format(info['company'], info['title']))
print(info['url'])
print(info['description'])
def main():
driver = webdriver.Firefox()
company_dict = {
"Red Hat": red_hat,
}
interesting_companies = db.session.query(Company) \
.filter(Company.interest == True)
for comp in interesting_companies:
company_dict[comp.name](driver)
driver.close()
# print_result({'company': 'comp',
# 'title': 'title',
# 'url': 'url.com',
# 'description': 'things and stuff'})
if __name__ == "__main__":
main()
| mit | -6,624,179,340,990,161,000 | 28.195122 | 71 | 0.56391 | false |
alexismirandan/Edit-image-kivy-app | layout/edit_image_layout.py | 1 | 2428 | # -*- coding: utf-8 -*
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ListProperty, ObjectProperty
from components.touch_selector import TouchSelector
from components.bubble_buttons import BubbleButtons
from layout.image_layout import ImageLayout
from kivy.uix.button import Button
class EditImageLayout(FloatLayout):
color_button = ListProperty([1, .3, .4, 1])
button_color = ListProperty([0, 0, 0, 1])
rectangle_selector = ObjectProperty()
text_size_rectangle = ObjectProperty()
image_layout = ObjectProperty()
bubble_buttons = ObjectProperty()
bubble_buttons_undo_confirm = ObjectProperty()
def __init__(self, **kwargs):
self.sm = kwargs.pop('sm', None)
self.crop_image_screen = kwargs.pop('crop_image_screen', None)
super(EditImageLayout, self).__init__(**kwargs)
self.rectangle_selector.bind(size_selected=self.on_change_size_rectangle_selector)
self.rectangle_selector.bind(size_selected_temp=self.update_text_size_rectangle)
self.bind(on_touch_down=self.bubble_buttons.hide)
self.bubble_buttons.resize_button.bind(on_press=self.on_press_resize_button)
self.bubble_buttons_undo_confirm.undo_button.bind(on_press=self.on_press_undo_button)
self.bubble_buttons_undo_confirm.confirm_button.bind(on_press=self.on_press_confirm_button)
def on_change_size_rectangle_selector(self, instance, size_selected):
if not self.rectangle_selector.tap_not_draw_a_line():
self.bubble_buttons.show()
else:
self.text_size_rectangle.text = ''
def on_press_resize_button(self, instance):
self.image_layout.resize_image(width=self.rectangle_selector.size_selected[0],
height=self.rectangle_selector.size_selected[1])
self.rectangle_selector.delete_line()
self.text_size_rectangle.text = ''
self.bubble_buttons_undo_confirm.show()
def on_press_undo_button(self, instance):
size = self.image_layout.old_size
self.image_layout.resize_image(width=size[0], height=size[1])
self.bubble_buttons_undo_confirm.hide()
def on_press_confirm_button(self, instance):
self.bubble_buttons_undo_confirm.hide()
def update_text_size_rectangle(self, instance, size):
self.text_size_rectangle.text = str('({0}, {1})'.format(int(size[0]), int(size[1])))
| mit | 7,713,281,658,232,743,000 | 43.145455 | 99 | 0.689044 | false |
botswana-harvard/bcpp-export | bcpp_export/old_export/constants.py | 1 | 1213 | import numpy as np
from edc_constants.constants import (
ALIVE as edc_ALIVE, DEAD as edc_DEAD, YES as edc_YES, NO as edc_NO,
POS as edc_POS, NEG as edc_NEG, IND as edc_IND, UNK as edc_UNK,
NOT_APPLICABLE as edc_NOT_APPLICABLE,
MALE as edc_MALE, FEMALE as edc_FEMALE)
SUBJECT_IDENTIFIER = 'subject_identifier'
HOUSEHOLD_MEMBER = 'household_member'
edc_DWTA = 'DWTA'
edc_NOT_SURE = 'Not Sure'
edc_ART_PRESCRIPTION = 'ART Prescription'
ALIVE = 1
DEAD = 0
DEFAULTER = 2
DWTA = 4
FEMALE = 2
IND = 2
MALE = 1
NAIVE = 1
NEG = 0
NO = 0
NOT_APPLICABLE = 3
NOT_SURE = 5
ON_ART = 3
PLOT_IDENTIFIER = 'plot_identifier'
POS = 1
UNK = 3
YES = 1
gender = {
edc_MALE: MALE,
edc_FEMALE: FEMALE}
hiv_options = {
edc_POS: POS,
edc_NEG: NEG,
edc_IND: IND,
edc_UNK: UNK,
'not_answering': DWTA,
'positive': POS,
'negative': NEG,
'not_sure': UNK,
None: np.nan}
tf = {
True: YES,
False: NO,
None: np.nan}
yes_no = {
edc_YES: YES,
edc_NO: NO,
'1': YES,
'0': NO,
edc_NOT_APPLICABLE: NOT_APPLICABLE,
None: np.nan,
edc_DWTA: DWTA,
edc_NOT_SURE: NOT_SURE}
survival = {
edc_ALIVE: ALIVE,
edc_DEAD: DEAD,
None: np.nan}
| gpl-2.0 | -9,128,420,541,188,606,000 | 17.104478 | 71 | 0.611707 | false |
NLeSC/PattyAnalytics | tests/test_utils.py | 1 | 2201 | import os
from tempfile import NamedTemporaryFile
import pcl
import numpy as np
from patty import utils
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_equal, assert_raises
def _compare( pcA, pcB ):
''' compare two pointclouds point-by-point'''
pcA_arr = np.asarray(pcA)
pcB_arr = np.asarray(pcB)
# dont use set_srs function, they will be tested later
if hasattr(pcA, 'offset' ):
pcA_arr += pcA.offset
if hasattr(pcB, 'offset' ):
pcB_arr += pcB.offset
assert_array_almost_equal(pcA_arr, pcB_arr, 2,
"Written/read point clouds are different!")
def test_read_write():
''' Test read and write LAS files functionality'''
filename = './testIO.las'
# make and save a pointcloud
pc1 = pcl.PointCloud(10)
pc1_arr = np.asarray(pc1)
pc1_arr[:] = np.random.randn(*pc1_arr.shape)
utils.save(pc1, filename)
# reload it
pc2 = utils.load(filename)
_compare( pc1, pc2 )
os.remove(filename)
def test_auto_file_format():
"""Test saving and loading pointclouds via the pcl loader"""
# make and save a pointcloud
pc = pcl.PointCloud(10)
pc_arr = np.asarray(pc)
pc_arr[:] = np.random.randn(*pc_arr.shape)
with NamedTemporaryFile(suffix='.ply') as f:
utils.save(pc, f.name)
pc2 = utils.load(f.name)
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.pcd') as f:
utils.save(pc, f.name)
pc2 = utils.load(f.name)
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.las') as f:
utils.save(pc, f.name, format="PLY")
pc2 = utils.load(f.name, format="PLY")
_compare( pc, pc2 )
with NamedTemporaryFile(suffix='.las') as f:
utils.save(pc, f.name, format="PCD")
pc2 = utils.load(f.name, format="PCD")
_compare( pc, pc2 )
def test_downsample_random():
pc = pcl.PointCloud(10)
a = np.asarray(pc)
a[:] = np.random.randn(*a.shape)
assert_raises(ValueError, utils.downsample_random, pc, 0)
assert_raises(ValueError, utils.downsample_random, pc, 2)
assert_equal(len(utils.downsample_random(pc, .39)), 4)
| apache-2.0 | -6,275,963,288,678,196,000 | 25.518072 | 73 | 0.625625 | false |
Shihta/python-novaclient | novaclient/tests/v1_1/test_hypervisors.py | 1 | 5832 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient.tests.fixture_data import client
from novaclient.tests.fixture_data import hypervisors as data
from novaclient.tests import utils
class HypervisorsTest(utils.FixturedTestCase):
client_fixture_class = client.V1
data_fixture_class = data.V1
def compare_to_expected(self, expected, hyper):
for key, value in expected.items():
self.assertEqual(getattr(hyper, key), value)
def test_hypervisor_index(self):
expected = [
dict(id=1234, hypervisor_hostname='hyper1'),
dict(id=5678, hypervisor_hostname='hyper2'),
]
result = self.cs.hypervisors.list(False)
self.assert_called('GET', '/os-hypervisors')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_hypervisor_detail(self):
expected = [
dict(id=1234,
service=dict(id=1, host='compute1'),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service=dict(id=2, host="compute2"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]
result = self.cs.hypervisors.list()
self.assert_called('GET', '/os-hypervisors/detail')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_hypervisor_search(self):
expected = [
dict(id=1234, hypervisor_hostname='hyper1'),
dict(id=5678, hypervisor_hostname='hyper2'),
]
result = self.cs.hypervisors.search('hyper')
self.assert_called('GET', '/os-hypervisors/hyper/search')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_hypervisor_servers(self):
expected = [
dict(id=1234,
hypervisor_hostname='hyper1',
servers=[
dict(name='inst1', uuid='uuid1'),
dict(name='inst2', uuid='uuid2')]),
dict(id=5678,
hypervisor_hostname='hyper2',
servers=[
dict(name='inst3', uuid='uuid3'),
dict(name='inst4', uuid='uuid4')]),
]
result = self.cs.hypervisors.search('hyper', True)
self.assert_called('GET', '/os-hypervisors/hyper/servers')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_hypervisor_get(self):
expected = dict(
id=1234,
service=dict(id=1, host='compute1'),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)
result = self.cs.hypervisors.get(1234)
self.assert_called('GET', '/os-hypervisors/1234')
self.compare_to_expected(expected, result)
def test_hypervisor_uptime(self):
expected = dict(
id=1234,
hypervisor_hostname="hyper1",
uptime="fake uptime")
result = self.cs.hypervisors.uptime(1234)
self.assert_called('GET', '/os-hypervisors/1234/uptime')
self.compare_to_expected(expected, result)
def test_hypervisor_statistics(self):
expected = dict(
count=2,
vcpus=8,
memory_mb=20 * 1024,
local_gb=500,
vcpus_used=4,
memory_mb_used=10 * 1024,
local_gb_used=250,
free_ram_mb=10 * 1024,
free_disk_gb=250,
current_workload=4,
running_vms=4,
disk_available_least=200,
)
result = self.cs.hypervisors.statistics()
self.assert_called('GET', '/os-hypervisors/statistics')
self.compare_to_expected(expected, result)
| apache-2.0 | -1,797,045,827,420,671,200 | 32.906977 | 78 | 0.54321 | false |
Spotipo/spotipo | tests/core/test_guestutils.py | 1 | 10985 | import sys
import pytest
from flask import current_app,url_for
from flask_wtf import Form
from wtforms import TextField
from faker import Faker
import arrow
import uuid
from unifispot.core.models import Wifisite,Device,Guesttrack,Guest,Loginauth,\
Guestsession
from unifispot.core.guestutils import init_track,validate_track,redirect_guest,\
assign_guest_entry,validate_loginauth_usage
from tests.helpers import randomMAC,get_guestauth_url
fake = Faker()
def test_init_track(session):
#
site1 = Wifisite.query.get(1)
apmac = randomMAC()
mac = randomMAC()
#test creating a new track
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 1 == count,'Guesttrack count is :%s instead of expected 1 '%count
#another track for same MAC done immediately shouldn't create track
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 1 == count,'Guesttrack count is :%s instead of expected 1 '%count
assert isinstance(track,Guesttrack),'init_track is not returning Guestrack instance'
#different MAC
track = init_track(site1,guestmac=randomMAC(),apmac=apmac)
count = Guesttrack.query.count()
assert 2 == count,'Guesttrack count is :%s instead of expected 2 '%count
#same MAC after track expiry
track = Guesttrack.query.get(1)
track.timestamp = arrow.utcnow().replace(seconds= -(current_app.config['GUESTTRACK_LIFETIME'] + 100)).naive
session.commit()
track = init_track(site1,guestmac=mac,apmac=apmac)
count = Guesttrack.query.count()
assert 3 == count,'Guesttrack count is :%s instead of expected 3 '%count
#check device count
dcount = Device.query.count()
assert 2 == dcount,'Device count is :%s instead of expected 2 '%count
def test_validate_track(session,client,register_testvalidateview):
#needs a fixture defined in conftest as its a decorator
trackid = str(uuid.uuid4())
mac = randomMAC()
#invalid track ID
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
trackid'%status
#valid track but non-valid site
guesttrack = Guesttrack(trackid=trackid,devicemac=mac)
session.add(guesttrack)
session.commit()
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
site'%status
#valid site but no device
site1 = Wifisite.query.get(1)
guesttrack.siteid = site1.id
session.commit
status = client.get('/validate_track/%s'%trackid).status
assert '404 NOT FOUND' == status,'Status is :%s instead of 404 for invalid \
device'%status
device = Device(devicemac=mac,siteid=site1.id)
session.add(device)
session.commit()
status = client.get('/validate_track/%s'%trackid).status
assert '200 OK' == status,'Status is :%s instead of 200 OK for valid \
track'%status
def test_redirect_guest(client,session):
site1 = Wifisite.query.get(1)
track = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
#nologin methods
with current_app.test_request_context():
resp = redirect_guest(site1,track)
url = get_guestauth_url(site1,track.trackid)
assert url == resp.location, 'Guest in no auth site is getting redirected to :%s instead of :%s'%\
(resp.location,url)
def test_assign_guest_entry(client,session):
#create dummy email and phone forms
class DummyForm1(Form):
email = TextField('Email')
firstname = TextField('Firstname')
extra1 = TextField('Extra1')
extra2 = TextField('Extra2')
class DummyForm2(Form):
phonenumber = TextField('Email')
firstname = TextField('Firstname')
extra1 = TextField('Extra1')
class DummyFBProfile():
first_name = None
last_name = None
email = None
gender = None
birthday = None
age_range = None
eform = DummyForm1()
eform.email.data = '[email protected]'
eform.firstname.data = 'firstname'
eform.extra1.data = 'extra1'
eform.extra2.data = 'extra2'
pform = DummyForm2()
pform.phonenumber.data = '+1234567890'
pform.firstname.data = 'firstname'
pform.extra1.data = 'extra1'
profile = {
'first_name': 'first_name',
'last_name':'last_name',
'email': '[email protected]',
'age_range': { 'min': 21, 'max':28} }
site1 = Wifisite.query.get(1)
#test creating a new track
##-----test email form
track1 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track2 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest1 = assign_guest_entry(site1,track1,form=eform)
guest1 = assign_guest_entry(site1,track2,form=eform)
cnt = Guest.query.count()
assert 1 == cnt, 'number of guest created is not 1 but :%s '%cnt
newguest = Guest.query.get(1)
assert newguest.details == {'Extra1':'extra1','Extra2':'extra2'}, 'Guest details is :%s insteads \
of expected :%s'%(newguest.details,{'Extra1':'extra1','Extra2':'extra2'})
assert newguest.siteid == site1.id, "Guest siteid is not correctly populated"
assert 1 == Guesttrack.query.get(1).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(2).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
##-----test phone form
track3 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track4 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest2 = assign_guest_entry(site1,track3,form=pform)
guest2 = assign_guest_entry(site1,track4,form=pform)
cnt = Guest.query.count()
assert 2 == cnt, 'number of guest created is not 2 but :%s '%cnt
assert 1 == Guesttrack.query.get(3).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(4).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
##-----test FB profile
track5 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
track6 = init_track(site1,guestmac=randomMAC(),apmac=randomMAC())
guest1 = assign_guest_entry(site1,track5,fbprofile=profile)
guest1 = assign_guest_entry(site1,track6,fbprofile=profile)
cnt = Guest.query.count()
assert 3 == cnt, 'number of guest created is not 3 but :%s '%cnt
newguest = Guest.query.get(3)
assert '[email protected]' == newguest.email,'Wrong email '
assert '21-28' == newguest.agerange, 'Wrong age range'
assert 1 == Guesttrack.query.get(5).loginstat.get('newguest'),\
'newguest is not set to 1 after new guest added'
assert None == Guesttrack.query.get(6).loginstat.get('newguest'),\
'newguest is not set to None after existing guest found'
def test_validate_loginauth_usage(client,session):
site1 = Wifisite.query.get(1)
apmac = randomMAC()
mac = randomMAC()
#test creating a new track
track = init_track(site1,guestmac=mac,apmac=apmac)
loginauth = Loginauth(siteid=site1.id,deviceid=track.deviceid)
loginauth.save()
#timenow for refference
utcnow = arrow.utcnow()
#create bunch of sessions
for i in range(10):
#wtih unused sessions
days = -(i+1)
session = Guestsession(siteid=site1.id,deviceid=track.deviceid,
loginauthid=loginauth.id)
session.starttime = utcnow.replace(days=days).naive
session.data_used = 50
session.duration = 20
session.save()
#fake login config
class Loginconfig:
def __init__(self,time_limit,data_limit):
self.time_limit = time_limit
self.data_limit = data_limit
#expired data
lconf = Loginconfig(100,50)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Expired datalimit not returning false'
#expired time
lconf = Loginconfig(20,500)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Expired timelimit not returning false'
#nonexpired
lconf = Loginconfig(200,500)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 400,'datlimit is :%s instead of expected 400'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 160,'time_limit is :%s instead of expected 160'%\
chkauth.time_limit
#unlimited data and limited time not expired
lconf = Loginconfig(50,0)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 1000,'datlimit is :%s instead of expected 1000'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 10,'time_limit is :%s instead of expected 10'%\
chkauth.time_limit
#unlimited data and limited time expired
lconf = Loginconfig(30,0)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
#unlimited time and limited data not expired
lconf = Loginconfig(0,300)
starttime = utcnow.replace(days=-2).naive
assert True == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
chkauth = Loginauth.query.get(1)
assert int(chkauth.data_limit) == 200,'datlimit is :%s instead of expected 200'%\
chkauth.data_limit
assert int(chkauth.time_limit) == 480,'time_limit is :%s instead of expected 480'%\
chkauth.time_limit
#unlimited time and limited data expired
lconf = Loginconfig(0,30)
starttime = utcnow.replace(days=-2).naive
assert False == validate_loginauth_usage(site1,track,lconf,
loginauth,starttime),'Non limits not returning True'
| agpl-3.0 | -1,459,336,541,446,399,200 | 39.241758 | 146 | 0.640419 | false |
PaloAltoNetworks/minemeld-core | minemeld/ft/ipop.py | 1 | 15103 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import netaddr
import uuid
import shutil
from . import base
from . import actorbase
from . import table
from . import st
from .utils import utc_millisec
from .utils import RESERVED_ATTRIBUTES
LOG = logging.getLogger(__name__)
WL_LEVEL = st.MAX_LEVEL
class MWUpdate(object):
def __init__(self, start, end, uuids):
self.start = start
self.end = end
self.uuids = set(uuids)
s = netaddr.IPAddress(start)
e = netaddr.IPAddress(end)
self._indicator = '%s-%s' % (s, e)
def indicator(self):
return self._indicator
def __repr__(self):
return 'MWUpdate('+self._indicator+', %r)' % self.uuids
def __hash__(self):
return hash(self._indicator)
def __eq__(self, other):
return self.start == other.start and \
self.end == other.end
class AggregateIPv4FT(actorbase.ActorBaseFT):
def __init__(self, name, chassis, config):
self.active_requests = []
super(AggregateIPv4FT, self).__init__(name, chassis, config)
def configure(self):
super(AggregateIPv4FT, self).configure()
self.whitelist_prefixes = self.config.get('whitelist_prefixes', [])
self.enable_list_merge = self.config.get('enable_list_merge', False)
def _initialize_tables(self, truncate=False):
self.table = table.Table(
self.name,
bloom_filter_bits=10,
truncate=truncate
)
self.table.create_index('_id')
self.st = st.ST(self.name+'_st', 32, truncate=truncate)
def initialize(self):
self._initialize_tables()
def rebuild(self):
self._initialize_tables(truncate=True)
def reset(self):
self._initialize_tables(truncate=True)
def _indicator_key(self, indicator, source):
return indicator+'\x00'+source
def _calc_indicator_value(self, uuids, additional_uuid=None, additional_value=None):
mv = {'sources': []}
for uuid_ in uuids:
if uuid_ == additional_uuid:
v = additional_value
else:
# uuid_ = str(uuid.UUID(bytes=uuid_))
k, v = next(
self.table.query('_id', from_key=uuid_, to_key=uuid_,
include_value=True),
(None, None)
)
if k is None:
LOG.error("Unable to find key associated with uuid: %s", uuid_)
for vk in v:
if vk in mv and vk in RESERVED_ATTRIBUTES:
mv[vk] = RESERVED_ATTRIBUTES[vk](mv[vk], v[vk])
else:
if self.enable_list_merge and vk in mv and isinstance(mv[vk], list):
if not isinstance(v[vk], list):
mv[vk] = v[vk]
else:
mv[vk].extend(v[vk])
else:
mv[vk] = v[vk]
return mv
def _merge_values(self, origin, ov, nv):
result = {'sources': []}
result['_added'] = ov['_added']
result['_id'] = ov['_id']
for k in nv.keys():
result[k] = nv[k]
return result
def _add_indicator(self, origin, indicator, value):
added = False
now = utc_millisec()
ik = self._indicator_key(indicator, origin)
v = self.table.get(ik)
if v is None:
v = {
'_id': str(uuid.uuid4()),
'_added': now
}
added = True
self.statistics['added'] += 1
v = self._merge_values(origin, v, value)
v['_updated'] = now
self.table.put(ik, v)
return v, added
def _calc_ipranges(self, start, end):
"""Calc IP Ranges overlapping the range between start and end
Args:
start (int): start of the range
end (int): end of the range
Returns:
set: set of ranges
"""
result = set()
# collect the endpoint between start and end
eps = set()
for epaddr, _, _, _ in self.st.query_endpoints(start=start, stop=end):
eps.add(epaddr)
eps = sorted(eps)
if len(eps) == 0:
return result
# walk thru the endpoints, tracking last endpoint
# current level, active segments and segments levels
oep = None
oeplevel = -1
live_ids = set()
slevels = {}
for epaddr in eps:
# for each endpoint we track which segments are starting
# and which ones are ending with that specific endpoint
end_ids = set()
start_ids = set()
eplevel = 0
for cuuid, clevel, cstart, cend in self.st.cover(epaddr):
slevels[cuuid] = clevel
if clevel > eplevel:
eplevel = clevel
if cstart == epaddr:
start_ids.add(cuuid)
if cend == epaddr:
end_ids.add(cuuid)
if cend != epaddr and cstart != epaddr:
if cuuid not in live_ids:
assert epaddr == eps[0]
live_ids.add(cuuid)
assert len(end_ids) + len(start_ids) > 0
if len(start_ids) != 0:
if oep is not None and oep != epaddr and len(live_ids) != 0:
if oeplevel != WL_LEVEL:
result.add(MWUpdate(oep, epaddr-1,
live_ids))
oep = epaddr
oeplevel = eplevel
live_ids = live_ids | start_ids
if len(end_ids) != 0:
if oep is not None and len(live_ids) != 0:
if eplevel < WL_LEVEL:
result.add(MWUpdate(oep, epaddr, live_ids))
oep = epaddr+1
live_ids = live_ids - end_ids
oeplevel = eplevel
if len(live_ids) != 0:
oeplevel = max([slevels[id_] for id_ in live_ids])
return result
def _range_from_indicator(self, indicator):
if '-' in indicator:
start, end = map(
lambda x: int(netaddr.IPAddress(x)),
indicator.split('-', 1)
)
elif '/' in indicator:
ipnet = netaddr.IPNetwork(indicator)
start = int(ipnet.ip)
end = start+ipnet.size-1
else:
start = int(netaddr.IPAddress(indicator))
end = start
if (not (start >= 0 and start <= 0xFFFFFFFF)) or \
(not (end >= 0 and end <= 0xFFFFFFFF)):
LOG.error('%s - {%s} invalid IPv4 indicator',
self.name, indicator)
return None, None
return start, end
def _endpoints_from_range(self, start, end):
"""Return last endpoint before range and first endpoint after range
Args:
start (int): range start
end (int): range stop
Returns:
tuple: (last endpoint before, first endpoint after)
"""
rangestart = next(
self.st.query_endpoints(start=0, stop=max(start-1, 0),
reverse=True),
None
)
if rangestart is not None:
rangestart = rangestart[0]
LOG.debug('%s - range start: %s', self.name, rangestart)
rangestop = next(
self.st.query_endpoints(reverse=False,
start=min(end+1, self.st.max_endpoint),
stop=self.st.max_endpoint,
include_start=False),
None
)
if rangestop is not None:
rangestop = rangestop[0]
LOG.debug('%s - range stop: %s', self.name, rangestop)
return rangestart, rangestop
@base._counting('update.processed')
def filtered_update(self, source=None, indicator=None, value=None):
vtype = value.get('type', None)
if vtype != 'IPv4':
self.statistics['update.ignored'] += 1
return
v, newindicator = self._add_indicator(source, indicator, value)
start, end = self._range_from_indicator(indicator)
if start is None or end is None:
return
level = 1
for p in self.whitelist_prefixes:
if source.startswith(p):
level = WL_LEVEL
break
LOG.debug("%s - update: indicator: (%s) %s %s level: %s",
self.name, indicator, start, end, level)
rangestart, rangestop = self._endpoints_from_range(start, end)
rangesb = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug('%s - ranges before update: %s', self.name, rangesb)
if not newindicator and level != WL_LEVEL:
for u in rangesb:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
return
uuidbytes = v['_id']
self.st.put(uuidbytes, start, end, level=level)
rangesa = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug('%s - ranges after update: %s', self.name, rangesa)
added = rangesa-rangesb
LOG.debug("%s - IP ranges added: %s", self.name, added)
removed = rangesb-rangesa
LOG.debug("%s - IP ranges removed: %s", self.name, removed)
for u in added:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in rangesa - added:
for ou in rangesb:
if u == ou and len(u.uuids ^ ou.uuids) != 0:
LOG.debug("IP range updated: %s", repr(u))
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in removed:
self.emit_withdraw(
u.indicator(),
value=self._calc_indicator_value(u.uuids)
)
@base._counting('withdraw.processed')
def filtered_withdraw(self, source=None, indicator=None, value=None):
LOG.debug("%s - withdraw from %s - %s", self.name, source, indicator)
if value is not None and value.get('type', None) != 'IPv4':
self.statistics['withdraw.ignored'] += 1
return
ik = self._indicator_key(indicator, source)
v = self.table.get(ik)
LOG.debug("%s - v: %s", self.name, v)
if v is None:
return
self.table.delete(ik)
self.statistics['removed'] += 1
start, end = self._range_from_indicator(indicator)
if start is None or end is None:
return
level = 1
for p in self.whitelist_prefixes:
if source.startswith(p):
level = WL_LEVEL
break
rangestart, rangestop = self._endpoints_from_range(start, end)
rangesb = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug("ranges before: %s", rangesb)
uuidbytes = v['_id']
self.st.delete(uuidbytes, start, end, level=level)
rangesa = set(self._calc_ipranges(rangestart, rangestop))
LOG.debug("ranges after: %s", rangesa)
added = rangesa-rangesb
LOG.debug("IP ranges added: %s", added)
removed = rangesb-rangesa
LOG.debug("IP ranges removed: %s", removed)
for u in added:
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in rangesa - added:
for ou in rangesb:
if u == ou and len(u.uuids ^ ou.uuids) != 0:
LOG.debug("IP range updated: %s", repr(u))
self.emit_update(
u.indicator(),
self._calc_indicator_value(u.uuids)
)
for u in removed:
self.emit_withdraw(
u.indicator(),
value=self._calc_indicator_value(
u.uuids,
additional_uuid=v['_id'],
additional_value=v
)
)
def _send_indicators(self, source=None, from_key=None, to_key=None):
if from_key is None:
from_key = 0
if to_key is None:
to_key = 0xFFFFFFFF
result = self._calc_ipranges(from_key, to_key)
for u in result:
self.do_rpc(
source,
"update",
indicator=u.indicator(),
value=self._calc_indicator_value(u.uuids)
)
def get(self, source=None, indicator=None):
if not type(indicator) in [str, unicode]:
raise ValueError("Invalid indicator type")
indicator = int(netaddr.IPAddress(indicator))
result = self._calc_ipranges(indicator, indicator)
if len(result) == 0:
return None
u = result.pop()
return self._calc_indicator_value(u.uuids)
def get_all(self, source=None):
self._send_indicators(source=source)
return 'OK'
def get_range(self, source=None, index=None, from_key=None, to_key=None):
if index is not None:
raise ValueError('Index not found')
if from_key is not None:
from_key = int(netaddr.IPAddress(from_key))
if to_key is not None:
to_key = int(netaddr.IPAddress(to_key))
self._send_indicators(
source=source,
from_key=from_key,
to_key=to_key
)
return 'OK'
def length(self, source=None):
return self.table.num_indicators
def stop(self):
super(AggregateIPv4FT, self).stop()
for g in self.active_requests:
g.kill()
self.active_requests = []
self.table.close()
LOG.info("%s - # indicators: %d", self.name, self.table.num_indicators)
@staticmethod
def gc(name, config=None):
actorbase.ActorBaseFT.gc(name, config=config)
shutil.rmtree(name, ignore_errors=True)
shutil.rmtree('{}_st'.format(name), ignore_errors=True)
| apache-2.0 | -2,142,277,314,815,049,000 | 30.01232 | 88 | 0.517579 | false |
giovannipro/map-the-glam | scraper/scrape/scrape_image_size-analogic.py | 1 | 9603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Get data from file page
import os # get file path
import webbrowser # open webpages
import time # get unix code
import datetime # convert in unix timestamp
import urllib, json, io # read json
from urllib import urlopen # open file
import sys # reset file encoding
import datetime # print time
import csv # read csv
import re # replace all occurrences
import pprint # pretty print
from bs4 import BeautifulSoup # parse html
from multiprocessing import Pool
# from multiprocessing import Process
reload(sys)
sys.setdefaultencoding("utf-8")
# -----------------------------------
# Utilities
folder = os.path.dirname(os.path.realpath(__file__))
t = "\t"
n = "\n"
s = " "
commons_page = "https://commons.wikimedia.org/wiki/"
def time():
my_format = "%d %m %Y %I:%M%p"
ts = datetime.datetime.utcnow().strftime(my_format)
print(ts)
def clean_url_a(title):
replace_01 = "?"
replace_02 = "&"
replace_03 = "ä"
replace_04 = "ö"
replace_06 = "("
replace_07 = ")"
replace_08 = ","
replace_10 = "…"
replace_11 = " "
replace_12 = "å"
replace_13 = "ü"
replace_14 = ","
replace_15 = "á"
replace_16 = '"'
replace_17 = '?'
# replace_09 = "-"
clean = title \
.replace(replace_01,"%3f") \
.replace(replace_02,"%26") \
.replace(replace_03,"%e4") \
.replace(replace_04,"%f6") \
.replace(replace_06,"%28") \
.replace(replace_07,"%29") \
.replace(replace_08,"%2c") \
.replace(replace_10,"%20") \
.replace(replace_11,"_") \
.replace(replace_12,"%e5") \
.replace(replace_13,"%fc") \
.replace(replace_14,"%2c") \
.replace(replace_15,"%e1") \
.replace(replace_16,"%22") \
.replace(replace_17,"%3f")
# .replace(replace_05,"%fc")
# .replace(replace_09,"%2d") \
return clean
def clean_url_b(title):
replace_01 = "å"
replace_02 = "é"
replace_03 = "ô"
replace_04 = "è"
replace_05 = "_"
replace_06 = " "
replace_07 = '?'
replace_08 = '&'
clean = title \
.replace(replace_01,"ä") \
.replace(replace_02,"%e9") \
.replace(replace_03,"%f4") \
.replace(replace_04,"%e8") \
.replace(replace_05,"_") \
.replace(replace_06,"_") \
.replace(replace_07,"%3f") \
.replace(replace_07,"%26")
return clean
# -----------------------------------
# Script
def get_img_size_analogic(f_name,start_id):
# start = time.time()
# print(start)
func = "img_size_analogic"
index = 0
f_in = folder + "/data/" + f_name + ".tsv"
f_out = folder + "/data/" + f_name + "_" + func + "-output.tsv"
f_err = folder + "/data/" + f_name + "_" + func + "-errors.tsv"
with open(f_in, "r") as f1:
with open(f_out, "a") as f2:
with open(f_err, "a") as f3:
tsv_file = csv.reader(f1, delimiter="\t")
for file in tsv_file:
index += 1
file_id = file[0]
file_name = file[1]
# print(file_name)
if (index >= start_id):
try:
url = commons_page + file_name
html = urlopen(url)
bsObj = BeautifulSoup(html,"html.parser")
print(file_id)
with open(f_out, "a") as f:
try:
raw_data = bsObj.find("div",{"class":"commons-file-information-table"})
output = str(file_id) + t + file_name + t
f2.write(output)
f2.write("-" + t)
# try:
# value_1 = raw_data.findAll("tr")[1].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_1)
# f2.write(value_1 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.1"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
# pass
f2.write("-" + t)
# try:
# value_2 = raw_data.findAll("tr")[2].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_2)
# f3.write(value_2 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.2"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
# pass
try:
value_3 = raw_data.findAll("tr")[3].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_3)
f2.write(value_3 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.3"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
try:
value_4 = raw_data.findAll("tr")[4].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_4)
f2.write(value_4 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.4"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
pass
try:
value_5 = raw_data.findAll("tr")[5].findAll("td")[1].get_text().replace(n,s)
#.split(s)[2] # h = dimension.split(s)[0]
# print(value_5)
f2.write(value_5 + t)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 3.5"
# print(output + str(e))
f2.write("-" + t)
f3.write(output + n)
pass
f2.write("-" + t)
# try:
# value_6 = raw_data.findAll("tr")[6].findAll("td")[1].get_text().replace(n,s)
# #.split(s)[2] # h = dimension.split(s)[0]
# # print(value_6)
# f2.write(value_6 + t)
# except Exception as e:
# output = str(file_id) + t + commons_page+file_name + t + "error 3.6"
# # print(output + str(e))
# f2.write("-" + t)
# f3.write(output + n)
f2.write(n)
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 2"
print(e)
f3.write(output + n)
pass
except Exception as e:
output = str(file_id) + t + commons_page+file_name + t + "error 1"
print(e)
f3.write(output + n)
pass
# end = time()
# running_time = end - start
# print (running_time)
def get_medium(f_name,start_id):
# start = time.time()
# print(start)
func = "medium"
index = 0
print(func)
f_in = folder + "/data/" + f_name + ".tsv"
f_out = folder + "/data/" + f_name + "_" + func + "-output.tsv"
f_err = folder + "/data/" + f_name + "_" + func + "-errors.tsv"
with open(f_in, "r") as f1:
with open(f_out, "a") as f2:
with open(f_err, "a") as f3:
tsv_file = csv.reader(f1, delimiter="\t")
for file in tsv_file:
index += 1
file_id = file[0]
file_name = file[1]
# print(file_name)
if (index >= start_id):
try:
url = commons_page + file_name
html = urlopen(url)
bsObj = BeautifulSoup(html,"html.parser")
print(file_id)
with open(f_out, "a") as f:
try:
raw_data = bsObj.find("div",{"class":"commons-file-information-table"})
output = str(file_id) + t + file_name + t
# print(output)
f2.write(output)
except Exception as e:
output = str(file_id) + t + commons_page + file_name + t + "error_1"
f3.write(output)
pass
try:
row = raw_data.findAll("tr")[1]
val = row.findAll("td")[0].get_text().replace(n,s)
value_1 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_1
print(value_1)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[2]
val = row.findAll("td")[0].get_text().replace(n,s)
value_2 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_2
print(value_2)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[3]
val = row.findAll("td")[0].get_text().replace(n,s)
value_3 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_3
print(value_3)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[4]
val = row.findAll("td")[0].get_text().replace(n,s)
value_4 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_4
print(value_4)
f2.write(output + n)
except Exception as e:
pass
try:
row = raw_data.findAll("tr")[5]
val = row.findAll("td")[0].get_text().replace(n,s)
value_5 = row.findAll("td")[1].get_text().replace(n,s)
if val == "Medium":
output = val + t + value_5
print(value_5)
f2.write(output + n)
except Exception as e:
pass
except Exception as e:
output = str(file_id) + t + commons_page + file_name + t + "error 1"
print(e)
# f3.write(output + n)
pass
# end = time()
# running_time = end - start
# print (running_time)
# -----------------------------------
# Launch scripts
# get_img_size_analogic("test",48766);
get_medium("file_medium",0);
| gpl-3.0 | -1,088,663,877,356,968,000 | 26.09322 | 88 | 0.512043 | false |
tom-mi/pyrad | pyrad/server.py | 1 | 9027 | # server.py
#
# Copyright 2003-2004,2007 Wichert Akkerman <[email protected]>
import select
import socket
from pyrad import host
from pyrad import packet
import logging
logger = logging.getLogger('pyrad')
class RemoteHost:
"""Remote RADIUS capable host we can talk to.
"""
def __init__(self, address, secret, name, authport=1812, acctport=1813):
"""Constructor.
:param address: IP address
:type address: string
:param secret: RADIUS secret
:type secret: string
:param name: short name (used for logging only)
:type name: string
:param authport: port used for authentication packets
:type authport: integer
:param acctport: port used for accounting packets
:type acctport: integer
"""
self.address = address
self.secret = secret
self.authport = authport
self.acctport = acctport
self.name = name
class ServerPacketError(Exception):
"""Exception class for bogus packets.
ServerPacketError exceptions are only used inside the Server class to
abort processing of a packet.
"""
class Server(host.Host):
"""Basic RADIUS server.
This class implements the basics of a RADIUS server. It takes care
of the details of receiving and decoding requests; processing of
the requests should be done by overloading the appropriate methods
in derived classes.
:ivar hosts: hosts who are allowed to talk to us
:type hosts: dictionary of Host class instances
:ivar _poll: poll object for network sockets
:type _poll: select.poll class instance
:ivar _fdmap: map of filedescriptors to network sockets
:type _fdmap: dictionary
:cvar MaxPacketSize: maximum size of a RADIUS packet
:type MaxPacketSize: integer
"""
MaxPacketSize = 8192
def __init__(self, addresses=[], authport=1812, acctport=1813, hosts=None,
dict=None, auto_crypt=False):
"""Constructor.
:param addresses: IP addresses to listen on
:type addresses: sequence of strings
:param authport: port to listen on for authentication packets
:type authport: integer
:param acctport: port to listen on for accounting packets
:type acctport: integer
:param hosts: hosts who we can talk to
:type hosts: dictionary mapping IP to RemoteHost class instances
:param dict: RADIUS dictionary to use
:type dict: Dictionary class instance
"""
host.Host.__init__(self, authport, acctport, dict)
if hosts is None:
self.hosts = {}
else:
self.hosts = hosts
self.authfds = []
self.acctfds = []
for addr in addresses:
self.BindToAddress(addr)
self.auto_crypt = auto_crypt
self.running = True
def BindToAddress(self, addr):
"""Add an address to listen to.
An empty string indicated you want to listen on all addresses.
:param addr: IP address to listen on
:type addr: string
"""
authfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
authfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
authfd.bind((addr, self.authport))
acctfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
acctfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
acctfd.bind((addr, self.acctport))
self.authfds.append(authfd)
self.acctfds.append(acctfd)
def HandleAuthPacket(self, pkt):
"""Authentication packet handler.
This is an empty function that is called when a valid
authentication packet has been received. It can be overriden in
derived classes to add custom behaviour.
:param pkt: packet to process
:type pkt: Packet class instance
"""
def HandleAcctPacket(self, pkt):
"""Accounting packet handler.
This is an empty function that is called when a valid
accounting packet has been received. It can be overriden in
derived classes to add custom behaviour.
:param pkt: packet to process
:type pkt: Packet class instance
"""
def _HandleAuthPacket(self, pkt):
"""Process a packet received on the authentication port.
If this packet should be dropped instead of processed a
ServerPacketError exception should be raised. The main loop will
drop the packet and log the reason.
:param pkt: packet to process
:type pkt: Packet class instance
"""
if pkt.source[0] not in self.hosts:
msg = 'Received packet from unknown host ({})'.format(pkt.source[0])
raise ServerPacketError(msg)
pkt.secret = self.hosts[pkt.source[0]].secret
if pkt.code != packet.AccessRequest:
raise ServerPacketError(
'Received non-authentication packet on authentication port')
self.HandleAuthPacket(pkt)
def _HandleAcctPacket(self, pkt):
"""Process a packet received on the accounting port.
If this packet should be dropped instead of processed a
ServerPacketError exception should be raised. The main loop will
drop the packet and log the reason.
:param pkt: packet to process
:type pkt: Packet class instance
"""
if pkt.source[0] not in self.hosts:
raise ServerPacketError('Received packet from unknown host')
pkt.secret = self.hosts[pkt.source[0]].secret
if not pkt.code in [packet.AccountingRequest,
packet.AccountingResponse]:
raise ServerPacketError(
'Received non-accounting packet on accounting port')
self.HandleAcctPacket(pkt)
def _GrabPacket(self, pktgen, fd):
"""Read a packet from a network connection.
This method assumes there is data waiting for to be read.
:param fd: socket to read packet from
:type fd: socket class instance
:return: RADIUS packet
:rtype: Packet class instance
"""
(data, source) = fd.recvfrom(self.MaxPacketSize)
pkt = pktgen(data)
pkt.source = source
pkt.fd = fd
return pkt
def _PrepareSockets(self):
"""Prepare all sockets to receive packets.
"""
for fd in self.authfds + self.acctfds:
self._fdmap[fd.fileno()] = fd
self._poll.register(fd.fileno(),
select.POLLIN | select.POLLPRI | select.POLLERR)
self._realauthfds = list(map(lambda x: x.fileno(), self.authfds))
self._realacctfds = list(map(lambda x: x.fileno(), self.acctfds))
def CreateReplyPacket(self, pkt, **attributes):
"""Create a reply packet.
Create a new packet which can be returned as a reply to a received
packet.
:param pkt: original packet
:type pkt: Packet instance
"""
reply = pkt.CreateReply(**attributes)
reply.source = pkt.source
return reply
def _ProcessInput(self, fd):
"""Process available data.
If this packet should be dropped instead of processed a
PacketError exception should be raised. The main loop will
drop the packet and log the reason.
This function calls either HandleAuthPacket() or
HandleAcctPacket() depending on which socket is being
processed.
:param fd: socket to read packet from
:type fd: socket class instance
"""
if fd.fileno() in self._realauthfds:
pkt = self._GrabPacket(lambda data, s=self:
s.CreateAuthPacket(packet=data,
auto_crypt=self.auto_crypt), fd)
self._HandleAuthPacket(pkt)
else:
pkt = self._GrabPacket(lambda data, s=self:
s.CreateAcctPacket(packet=data), fd)
self._HandleAcctPacket(pkt)
def Stop(self):
self.running = False
def Run(self):
"""Main loop.
This method is the main loop for a RADIUS server. It waits
for packets to arrive via the network and calls other methods
to process them.
"""
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
while self.running:
for (fd, event) in self._poll.poll(10):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except ServerPacketError as err:
logger.info('Dropping packet: ' + str(err))
except packet.PacketError as err:
logger.info('Received a broken packet: ' + str(err))
else:
logger.error('Unexpected event in server main loop')
| bsd-3-clause | -1,549,894,543,043,070,200 | 34.4 | 80 | 0.611056 | false |
unioslo/cerebrum | contrib/exchange/exchange_group_state_verification.py | 1 | 22909 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2018 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Script that checks the state of dist.groups between Cerebrum and Exchange.
This is done by:
- Pulling out all related attributes from Exchange, via LDAP.
- Pulling out all related information from Cerebrum, via API.
- Compare the two above.
- Send a report by mail/file.
"""
import argparse
import itertools
import logging
import pickle
import time
import ldap
from six import text_type
import cereconf
import eventconf
import Cerebrum.logutils
import Cerebrum.logutils.options
from Cerebrum import Utils
from Cerebrum.Utils import Factory
from Cerebrum.Utils import read_password
from Cerebrum.modules.Email import EmailAddress
from Cerebrum.modules.exchange.CerebrumUtils import CerebrumUtils
from Cerebrum.utils.email import sendmail
from Cerebrum.utils.ldaputils import decode_attrs
logger = logging.getLogger(__name__)
def text_decoder(encoding, allow_none=True):
def to_text(value):
if allow_none and value is None:
return None
if isinstance(value, bytes):
return value.decode(encoding)
return text_type(value)
return to_text
class StateChecker(object):
"""Wrapper class for state-checking functions.
The StateChecker class wraps all the functions we need in order to
verify and report deviances between Cerebrum and Exchange.
"""
# Connect params
LDAP_RETRY_DELAY = 60
LDAP_RETRY_MAX = 5
# Search and result params
LDAP_COM_DELAY = 30
LDAP_COM_MAX = 3
def __init__(self, conf):
"""Initzialize a new instance of out state-checker.
:param logger logger: The logger to use.
:param dict conf: Our StateCheckers configuration.
"""
self.db = Factory.get('Database')(client_encoding='UTF-8')
self.co = Factory.get('Constants')(self.db)
self.dg = Factory.get('DistributionGroup')(self.db)
self.ac = Factory.get('Account')(self.db)
self.gr = Factory.get('Group')(self.db)
self.et = Factory.get('EmailTarget')(self.db)
self.ea = EmailAddress(self.db)
self.ut = CerebrumUtils()
self.config = conf
self._ldap_page_size = 1000
def u(self, db_value):
""" Decode bytestring from database. """
if isinstance(db_value, bytes):
return db_value.decode(self.db.encoding)
return text_type(db_value)
def init_ldap(self):
"""Initzialize LDAP connection."""
self.ldap_srv = ldap.ldapobject.ReconnectLDAPObject(
'%s://%s/' % (self.config['ldap_proto'],
self.config['ldap_server']),
retry_max=self.LDAP_RETRY_MAX,
retry_delay=self.LDAP_RETRY_DELAY)
usr = self.config['ldap_user'].split('\\')[1]
self.ldap_srv.bind_s(
self.config['ldap_user'],
read_password(usr, self.config['ldap_server']))
self.ldap_lc = ldap.controls.SimplePagedResultsControl(
True, self._ldap_page_size, '')
def _searcher(self, ou, scope, attrs, ctrls):
""" Perform ldap.search(), but retry in the event of an error.
This wraps the search with error handling, so that the search is
repeated with a delay between attempts.
"""
for attempt in itertools.count(1):
try:
return self.ldap_srv.search_ext(
ou, scope, attrlist=attrs, serverctrls=ctrls)
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in _searcher on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
def _recvr(self, msgid):
""" Perform ldap.result3(), but retry in the event of an error.
This wraps the result fetching with error handling, so that the fetch
is repeated with a delay between attempts.
It also decodes all attributes and attribute text values.
"""
for attempt in itertools.count(1):
try:
# return self.ldap_srv.result3(msgid)
rtype, rdata, rmsgid, sc = self.ldap_srv.result3(msgid)
return rtype, decode_attrs(rdata), rmsgid, sc
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in _recvr on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
# This is a paging searcher, that should be used for large amounts of data
def search(self, ou, attrs, scope=ldap.SCOPE_SUBTREE):
"""Wrapper for the search- and result-calls.
Implements paged searching.
:param str ou: The OU to search in.
:param list attrs: The attributes to fetch.
:param int scope: Our search scope, default is subtree.
:rtype: list
:return: List of objects.
"""
# Implementing paging, taken from
# http://www.novell.com/coolsolutions/tip/18274.html
msgid = self._searcher(ou, scope, attrs, [self.ldap_lc])
data = []
ctrltype = ldap.controls.SimplePagedResultsControl.controlType
while True:
time.sleep(1)
rtype, rdata, rmsgid, sc = self._recvr(msgid)
data.extend(rdata)
pctrls = [c for c in sc if c.controlType == ctrltype]
if pctrls:
cookie = pctrls[0].cookie
if cookie:
self.ldap_lc.cookie = cookie
time.sleep(1)
msgid = self._searcher(ou, scope, attrs, [self.ldap_lc])
else:
break
else:
logger.warn('Server ignores RFC 2696 control.')
break
# Skip the OU itself, only return objects in the OU
return data[1:]
# This search wrapper should be used for fetching members
def member_searcher(self, dn, scope, attrs):
"""Utility method for searching for group members.
:param str dn: The groups distinguished name.
:param int scope: Which scope to search by, should be BASE.
:param list attrs: A list of attributes to fetch.
:rtype: tuple
:return: The return-type and the result.
"""
# Wrapping the search, try three times
for attempt in itertools.count(1):
try:
# Search
msgid = self.ldap_srv.search(dn, scope, attrlist=attrs)
# Fetch
rtype, r = self.ldap_srv.result(msgid)
return rtype, r
except ldap.LDAPError as e:
if attempt < self.LDAP_COM_MAX:
logger.debug('Caught %r in member_searcher on attempt %d',
e, attempt)
time.sleep(self.LDAP_COM_DELAY)
continue
raise
# We need to implement a special function to pull out all the members from
# a group, since the idiots at M$ forces us to select a range...
# Fucking asswipes will burn in hell.
def collect_members(self, dn):
"""Fetch a groups members.
This method picks out members in slices, since AD LDAP won't give us
more than 1500 users at a time. If the range-part of the attribute name
ends with a star, we know that we need to look for more members...
:param str dn: The groups distinguished name.
:rtype: list
:return: A list of the members.
"""
# We are searching trough a range. 0 is the start point.
low = str(0)
members = []
end = False
while not end:
# * means that we search for as many attributes as possible, from
# the start point defined by the low-param
attr = ['member;range=%s-*' % low]
# Search'n fetch
time.sleep(1) # Be polite
rtype, r = self.member_searcher(dn, ldap.SCOPE_BASE, attr)
# If this shit hits, no members exists. Break of.
if not r[0][1]:
end = True
break
# Dig out the data
r = r[0][1]
# Extract key
key = r.keys()[0]
# Store members
members.extend(r[key])
# If so, we have reached the end of the range
# (i.e. key is 'member;range=7500-*')
if '*' in key:
end = True
# Extract the new start point from the key
# (i.e. key is 'member;range=0-1499')
else:
low = str(int(key.split('-')[-1]) + 1)
return members
def close(self):
"""Close the connection to the LDAP server."""
self.ldap_srv.unbind_s()
###
# Group related fetching & comparison
###
def collect_exchange_group_info(self, group_ou):
"""Collect group-information from Exchange, via LDAP.
:param str group_ou: The OrganizationalUnit to search for groups.
:rtype: dict
:return: A dict with the group attributes. The key is the group name.
"""
attrs = ['displayName',
'info',
'proxyAddresses',
'msExchHideFromAddressLists']
r = self.search(group_ou, attrs)
ret = {}
for cn, data in r:
tmp = {}
name = cn[3:].split(',')[0]
for key in data:
if key == 'info':
tmp[u'Description'] = data[key][0]
elif key == 'displayName':
tmp[u'DisplayName'] = data[key][0]
elif key == 'proxyAddresses':
addrs = []
for addr in data[key]:
if addr.startswith('SMTP:'):
tmp[u'Primary'] = addr[5:]
# TODO: Correct var?
if (cereconf.EXCHANGE_DEFAULT_ADDRESS_PLACEHOLDER not
in addr):
addrs.append(addr[5:])
tmp[u'Aliases'] = sorted(addrs)
elif key == 'managedBy':
tmp_man = data[key][0][3:].split(',')[0]
if tmp_man == 'Default group moderator':
tmp_man = u'groupadmin'
tmp[u'ManagedBy'] = [tmp_man]
# Skip reporting memberships for roomlists, since we don't manage
# those memberships.
# TODO: Generalize this
if name.startswith('rom-'):
tmp['Members'] = []
else:
# Pulling 'em out the logical way... S..
tmp['Members'] = sorted([m[3:].split(',')[0] for m in
self.collect_members(cn)])
# Non-existent attribute means that the value is false. Fuckers.
if 'msExchHideFromAddressLists' in data:
tmp_key = 'msExchHideFromAddressLists'
tmp[u'HiddenFromAddressListsEnabled'] = (
True if data[tmp_key][0] == 'TRUE' else False)
else:
tmp[u'HiddenFromAddressListsEnabled'] = False
ret[name] = tmp
return ret
def collect_cerebrum_group_info(self, mb_spread, ad_spread):
"""Collect distgroup related information from Cerebrum.
:param int/str mb_spread: Spread of mailboxes in exchange.
:param int/str ad_spread: Spread of accounts in AD.
:rtype: dict
:return: A dict of users attributes. Uname is key.
"""
mb_spread = self.co.Spread(mb_spread)
ad_spread = self.co.Spread(ad_spread)
u = text_decoder(self.db.encoding)
def _true_or_false(val):
# Yes, we know...
if val == 'T':
return True
elif val == 'F':
return False
else:
return None
tmp = {}
for dg in self.dg.list_distribution_groups():
self.dg.clear()
self.dg.find(dg['group_id'])
roomlist = _true_or_false(self.dg.roomlist)
data = self.dg.get_distgroup_attributes_and_targetdata(
roomlist=roomlist)
tmp[u(self.dg.group_name)] = {
u'Description': u(self.dg.description),
u'DisplayName': u(data['displayname']),
}
if not roomlist:
# Split up the moderated by field, and resolve group members
# from groups if there are groups in the moderated by field!
tmp[u(self.dg.group_name)].update({
u'HiddenFromAddressListsEnabled':
_true_or_false(data['hidden']),
u'Primary': u(data['primary']),
u'Aliases': [u(v) for v in sorted(data['aliases'])]
})
# Collect members
membs_unfiltered = self.ut.get_group_members(
self.dg.entity_id,
spread=mb_spread,
filter_spread=ad_spread
)
members = [u(member['name']) for member in membs_unfiltered]
tmp[u(self.dg.group_name)].update({u'Members': sorted(members)})
return tmp
def compare_group_state(self, ex_group_info, cere_group_info, state,
config):
"""Compare the information fetched from Cerebrum and Exchange.
This method produces a dict with the state between the systems,
and a report that will be sent to the appropriate target system
administrators.
:param dict ex_state: The state in Exchange.
:param dict ce_state: The state in Cerebrum.
:param dict state: The previous state generated by this method.
:param dict config: Configuration of reporting delays for various
attributes.
:rtype: tuple
:return: A tuple consisting of the new difference-state and a
human-readable report of differences.
"""
s_ce_keys = set(cere_group_info.keys())
s_ex_keys = set(ex_group_info.keys())
diff_group = {}
diff_stale = {}
diff_new = {}
##
# Populate some structures with information we need
# Groups in Exchange, but not in Cerebrum
stale_keys = list(s_ex_keys - s_ce_keys)
for ident in stale_keys:
if state and ident in state['stale_group']:
diff_stale[ident] = state['stale_group'][ident]
else:
diff_stale[ident] = time.time()
# Groups in Cerebrum, but not in Exchange
new_keys = list(s_ce_keys - s_ex_keys)
for ident in new_keys:
if state and ident in state['new_group']:
diff_new[ident] = state['new_group'][ident]
else:
diff_new[ident] = time.time()
# Check groups that exists in both Cerebrum and Exchange for
# difference (& is union, in case you wondered). If an attribute is
# not in it's desired state in both this and the last run, save the
# timestamp from the last run. This is used for calculating when we
# nag to someone about stuff not beeing in sync.
for key in s_ex_keys & s_ce_keys:
for attr in cere_group_info[key]:
tmp = {}
if state and key in state['group'] and \
attr in state['group'][key]:
t_0 = state['group'][key][attr][u'Time']
else:
t_0 = time.time()
if attr not in ex_group_info[key]:
tmp = {
u'Exchange': None,
u'Cerebrum': cere_group_info[key][attr],
u'Time': t_0
}
elif cere_group_info[key][attr] != ex_group_info[key][attr]:
tmp = {
u'Exchange': ex_group_info[key][attr],
u'Cerebrum': cere_group_info[key][attr],
u'Time': t_0
}
if tmp:
diff_group.setdefault(key, {})[attr] = tmp
ret = {
'new_group': diff_new,
'stale_group': diff_stale,
'group': diff_group,
}
if not state:
return ret, []
now = time.time()
# By now, we have three different dicts. Loop trough them and check if
# we should report 'em
report = ['\n\n# Group Attribute Since Cerebrum_value:Exchange_value']
# Report attribute mismatches for groups
for key in diff_group:
for attr in diff_group[key]:
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
if diff_group[key][attr][u'Time'] < now - delta:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_group[key][attr][u'Time']))
if attr in (u'Aliases', u'Members',):
# We report the difference for these types, for
# redability
s_ce_attr = set(diff_group[key][attr][u'Cerebrum'])
try:
s_ex_attr = set(diff_group[key][attr][u'Exchange'])
except TypeError:
s_ex_attr = set([])
new_attr = list(s_ce_attr - s_ex_attr)
stale_attr = list(s_ex_attr - s_ce_attr)
if new_attr == stale_attr:
continue
tmp = u'%-10s %-30s %s +%s:-%s' % (key, attr, t,
str(new_attr),
str(stale_attr))
else:
tmp = u'%-10s %-30s %s %s:%s' % (
key, attr, t,
repr(diff_group[key][attr][u'Cerebrum']),
repr(diff_group[key][attr][u'Exchange']))
report += [tmp]
# Report uncreated groups
report += ['\n# Uncreated groups (uname, time)']
attr = 'UncreatedGroup'
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
for key in diff_new:
if diff_new[key] < now - delta:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_new[key]))
report += [u'%-10s uncreated_group %s' % (key, t)]
# Report stale groups
report += ['\n# Stale groups (uname, time)']
attr = 'StaleGroup'
delta = (config.get(attr) if attr in config else
config.get('UndefinedAttribute'))
for key in diff_stale:
t = time.strftime(u'%d%m%Y-%H:%M', time.localtime(
diff_stale[key]))
if diff_stale[key] < now - delta:
report += [u'%-10s stale_group %s' % (key, t)]
return ret, report
def eventconf_type(value):
try:
return eventconf.CONFIG[value]
except KeyError as e:
raise ValueError(e)
def main(inargs=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--type',
dest='config',
type=eventconf_type,
required=True,
help="Sync type (a valid entry in eventconf.CONFIG)")
parser.add_argument(
'-f', '--file',
dest='state',
required=True,
help="read and write state to %(metavar)s")
parser.add_argument(
'-m', '--mail',
help="Send reports to %(metavar)s")
parser.add_argument(
'-s', '--sender',
help="Send reports from %(metavar)s")
parser.add_argument(
'-r', '--report-file',
dest='report',
help="Write the report to %(metavar)s")
Cerebrum.logutils.options.install_subparser(parser)
args = parser.parse_args(inargs)
if bool(args.mail) ^ bool(args.sender):
raise ValueError("Must give both mail and sender")
Cerebrum.logutils.autoconf('cronjob', args)
attr_config = args.config['state_check_conf']
group_ou = args.config['group_ou']
try:
with open(args.state, 'r') as f:
state = pickle.load(f)
except IOError:
logger.warn('No existing state file %s', args.state)
state = None
sc = StateChecker(args.config)
# Collect group info from Cerebrum and Exchange
sc.init_ldap()
ex_group_info = sc.collect_exchange_group_info(group_ou)
sc.close()
cere_group_info = sc.collect_cerebrum_group_info(
args.config['mailbox_spread'],
args.config['ad_spread'])
# Compare group state
new_state, report = sc.compare_group_state(ex_group_info,
cere_group_info,
state,
attr_config)
try:
rep = u'\n'.join(report)
except UnicodeError as e:
logger.warn('Bytestring data in report: %r', e)
tmp = []
for x in report:
tmp.append(x.decode('UTF-8'))
rep = u'\n'.join(tmp)
# Send a report by mail
if args.mail and args.sender:
sendmail(args.mail, args.sender,
'Exchange group state report',
rep.encode('utf-8'))
# Write report to file
if args.report:
with open(args.report, 'w') as f:
f.write(rep.encode('utf-8'))
with open(args.state, 'w') as f:
pickle.dump(new_state, f)
if __name__ == '__main__':
main()
| gpl-2.0 | 7,390,959,573,923,553,000 | 35.713141 | 79 | 0.533677 | false |
domeger/SplunkTAforPuppetEnterprise | bin/puppet_enterprise_metrics.py | 1 | 4586 | import splunktaforpuppetenterprise_declare
import os
import sys
import time
import datetime
import json
import modinput_wrapper.base_modinput
from solnlib.packages.splunklib import modularinput as smi
import input_module_puppet_enterprise_metrics as input_module
bin_dir = os.path.basename(__file__)
'''
Do not edit this file!!!
This file is generated by Add-on builder automatically.
Add your modular input logic to file input_module_puppet_enterprise_metrics.py
'''
class ModInputpuppet_enterprise_metrics(modinput_wrapper.base_modinput.BaseModInput):
def __init__(self):
if 'use_single_instance_mode' in dir(input_module):
use_single_instance = input_module.use_single_instance_mode()
else:
use_single_instance = False
super(ModInputpuppet_enterprise_metrics, self).__init__("splunktaforpuppetenterprise", "puppet_enterprise_metrics", use_single_instance)
self.global_checkbox_fields = None
def get_scheme(self):
"""overloaded splunklib modularinput method"""
scheme = super(ModInputpuppet_enterprise_metrics, self).get_scheme()
scheme.title = ("Puppet Enterprise Metrics")
scheme.description = ("Go to the add-on\'s configuration UI and configure modular inputs under the Inputs menu.")
scheme.use_external_validation = True
scheme.streaming_mode_xml = True
scheme.add_argument(smi.Argument("name", title="Name",
description="",
required_on_create=True))
"""
For customized inputs, hard code the arguments here to hide argument detail from users.
For other input types, arguments should be get from input_module. Defining new input types could be easier.
"""
scheme.add_argument(smi.Argument("token_", title="Token:",
description="curl -k -X POST -H \'Content-Type: application/json\' -d \'{\"login\": \"\", \"password\": \"\",\"lifetime\": \"9y\" }\' https://$:4433/rbac-api/v1/auth/token",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("puppet_enterprise_server_", title="Puppet Enterprise Server:",
description="Put in your FQDN of your Puppet Enterprise Server so the links backs on the dashboards work correctly.",
required_on_create=False,
required_on_edit=False))
scheme.add_argument(smi.Argument("server_", title="Server:",
description="Input your Puppet Enterprise Server address.",
required_on_create=True,
required_on_edit=False))
scheme.add_argument(smi.Argument("port_", title="Port:",
description="Input your Puppet Enterprise DB Port (HTTPS 8081, HTTP: 8080)",
required_on_create=True,
required_on_edit=False))
return scheme
def get_app_name(self):
return "SplunkTAforPuppetEnterprise"
def validate_input(self, definition):
"""validate the input stanza"""
input_module.validate_input(self, definition)
def collect_events(self, ew):
"""write out the events"""
input_module.collect_events(self, ew)
def get_account_fields(self):
account_fields = []
return account_fields
def get_checkbox_fields(self):
checkbox_fields = []
return checkbox_fields
def get_global_checkbox_fields(self):
if self.global_checkbox_fields is None:
checkbox_name_file = os.path.join(bin_dir, 'global_checkbox_param.json')
try:
if os.path.isfile(checkbox_name_file):
with open(checkbox_name_file, 'r') as fp:
self.global_checkbox_fields = json.load(fp)
else:
self.global_checkbox_fields = []
except Exception as e:
self.log_error('Get exception when loading global checkbox parameter names. ' + str(e))
self.global_checkbox_fields = []
return self.global_checkbox_fields
if __name__ == "__main__":
exitcode = ModInputpuppet_enterprise_metrics().run(sys.argv)
sys.exit(exitcode)
| apache-2.0 | -8,698,139,154,685,138,000 | 43.960784 | 214 | 0.586786 | false |
EiNSTeiN-/deluge-gtk3 | deluge/ui/gtkui/edittrackersdialog.py | 1 | 9157 | #
# edittrackersdialog.py
#
# Copyright (C) 2007, 2008 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from gi.repository import Gtk
import pkg_resources
import deluge.common
import common
from deluge.ui.client import client
import deluge.component as component
from deluge.log import LOG as log
class EditTrackersDialog:
def __init__(self, torrent_id, parent=None):
self.torrent_id = torrent_id
self.glade = Gtk.Builder()
self.glade.add_from_file(
pkg_resources.resource_filename("deluge.ui.gtkui",
"builder/edit_trackers.ui"))
self.dialog = self.glade.get_object("edit_trackers_dialog")
self.treeview = self.glade.get_object("tracker_treeview")
self.add_tracker_dialog = self.glade.get_object("add_tracker_dialog")
self.add_tracker_dialog.set_transient_for(self.dialog)
self.edit_tracker_entry = self.glade.get_object("edit_tracker_entry")
self.edit_tracker_entry.set_transient_for(self.dialog)
self.dialog.set_icon(common.get_deluge_icon())
if parent != None:
self.dialog.set_transient_for(parent)
# Connect the signals
self.glade.connect_signals({
"on_button_up_clicked": self.on_button_up_clicked,
"on_button_add_clicked": self.on_button_add_clicked,
"on_button_edit_clicked": self.on_button_edit_clicked,
"on_button_edit_cancel_clicked": self.on_button_edit_cancel_clicked,
"on_button_edit_ok_clicked": self.on_button_edit_ok_clicked,
"on_button_remove_clicked": self.on_button_remove_clicked,
"on_button_down_clicked": self.on_button_down_clicked,
"on_button_ok_clicked": self.on_button_ok_clicked,
"on_button_cancel_clicked": self.on_button_cancel_clicked,
"on_button_add_ok_clicked": self.on_button_add_ok_clicked,
"on_button_add_cancel_clicked": self.on_button_add_cancel_clicked
})
# Create a liststore for tier, url
self.liststore = Gtk.ListStore(int, str)
# Create the columns
self.treeview.append_column(
Gtk.TreeViewColumn(_("Tier"), Gtk.CellRendererText(), text=0))
self.treeview.append_column(
Gtk.TreeViewColumn(_("Tracker"), Gtk.CellRendererText(), text=1))
self.treeview.set_model(self.liststore)
self.liststore.set_sort_column_id(0, Gtk.SortType.ASCENDING)
def run(self):
# Make sure we have a torrent_id.. if not just return
if self.torrent_id == None:
return
# Get the trackers for this torrent
session = component.get("SessionProxy")
session.get_torrent_status(self.torrent_id, ["trackers"]).addCallback(self._on_get_torrent_status)
client.force_call()
def _on_get_torrent_status(self, status):
"""Display trackers dialog"""
if 'trackers' in status:
for tracker in status["trackers"]:
self.add_tracker(tracker["tier"], tracker["url"])
self.dialog.show()
def add_tracker(self, tier, url):
"""Adds a tracker to the list"""
self.liststore.append([tier, url])
def get_selected(self):
"""Returns the selected tracker"""
return self.treeview.get_selection().get_selected()[1]
def on_button_add_clicked(self, widget):
log.debug("on_button_add_clicked")
# Show the add tracker dialog
self.add_tracker_dialog.show()
self.glade.get_object("textview_trackers").grab_focus()
def on_button_remove_clicked(self, widget):
log.debug("on_button_remove_clicked")
selected = self.get_selected()
if selected != None:
self.liststore.remove(selected)
def on_button_edit_clicked(self, widget):
"""edits an existing tracker"""
log.debug("on_button_edit_clicked")
selected = self.get_selected()
if selected:
tracker = self.liststore.get_value(selected, 1)
self.glade.get_object("entry_edit_tracker").set_text(tracker)
self.edit_tracker_entry.show()
self.glade.get_object("edit_tracker_entry").grab_focus()
def on_button_edit_cancel_clicked(self, widget):
log.debug("on_button_edit_cancel_clicked")
self.edit_tracker_entry.hide()
def on_button_edit_ok_clicked(self, widget):
log.debug("on_button_edit_ok_clicked")
selected = self.get_selected()
tracker = self.glade.get_object("entry_edit_tracker").get_text()
self.liststore.set_value(selected, 1, tracker)
self.edit_tracker_entry.hide()
def on_button_up_clicked(self, widget):
log.debug("on_button_up_clicked")
selected = self.get_selected()
num_rows = self.liststore.iter_n_children(None)
if selected != None and num_rows > 1:
tier = self.liststore.get_value(selected, 0)
if not tier > 0:
return
new_tier = tier - 1
# Now change the tier for this tracker
self.liststore.set_value(selected, 0, new_tier)
def on_button_down_clicked(self, widget):
log.debug("on_button_down_clicked")
selected = self.get_selected()
num_rows = self.liststore.iter_n_children(None)
if selected != None and num_rows > 1:
tier = self.liststore.get_value(selected, 0)
new_tier = tier + 1
# Now change the tier for this tracker
self.liststore.set_value(selected, 0, new_tier)
def on_button_ok_clicked(self, widget):
log.debug("on_button_ok_clicked")
self.trackers = []
def each(model, path, iter, data):
tracker = {}
tracker["tier"] = model.get_value(iter, 0)
tracker["url"] = model.get_value(iter, 1)
self.trackers.append(tracker)
self.liststore.foreach(each, None)
# Set the torrens trackers
client.core.set_torrent_trackers(self.torrent_id, self.trackers)
self.dialog.destroy()
def on_button_cancel_clicked(self, widget):
log.debug("on_button_cancel_clicked")
self.dialog.destroy()
def on_button_add_ok_clicked(self, widget):
log.debug("on_button_add_ok_clicked")
# Create a list of trackers from the textview widget
textview = self.glade.get_object("textview_trackers")
trackers = []
b = textview.get_buffer()
lines = b.get_text(b.get_start_iter(), b.get_end_iter()).strip().split("\n")
for l in lines:
if deluge.common.is_url(l):
trackers.append(l)
for tracker in trackers:
# Figure out what tier number to use.. it's going to be the highest+1
# Also check for duplicates
# Check if there are any entries
duplicate = False
highest_tier = -1
for row in self.liststore:
tier = row[0]
if tier > highest_tier:
highest_tier = tier
if tracker == row[1]:
duplicate = True
break
# If not a duplicate, then add it to the list
if not duplicate:
# Add the tracker to the list
self.add_tracker(highest_tier + 1, tracker)
# Clear the entry widget and hide the dialog
textview.get_buffer().set_text("")
self.add_tracker_dialog.hide()
def on_button_add_cancel_clicked(self, widget):
log.debug("on_button_add_cancel_clicked")
# Clear the entry widget and hide the dialog
b = Gtk.TextBuffer()
self.glade.get_object("textview_trackers").set_buffer(b)
self.add_tracker_dialog.hide()
| gpl-3.0 | -8,834,094,978,799,285,000 | 38.640693 | 106 | 0.624331 | false |
fsxfreak/club-suite | clubsuite/suite/views/view_budget.py | 1 | 4194 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import View
from django.urls import reverse
from django.contrib import messages
from suite.models import Club, Division, Budget
from suite.forms import DivisionCreateForm, BudgetCreateForm
from guardian.shortcuts import get_perms
from django.core.exceptions import PermissionDenied
class Budget(UserPassesTestMixin, LoginRequiredMixin, View):
template_name = 'dashboard/budget.html'
division_form_class = DivisionCreateForm
budget_form_class = BudgetCreateForm
def test_func(self):
club = get_object_or_404(Club, pk=self.kwargs['club_id'])
if 'can_access_budget' not in get_perms(self.request.user, club):
raise PermissionDenied
return True
def generate_books(self, divs):
books = []
for div in divs:
budgets = div.budget_set.all()
total_budget = 0
for budget in budgets:
total_budget = total_budget + budget.planned
events = div.event_set.all()
total_expense = 0
for event in events:
total_expense = total_expense + event.event_cost
books.append({ 'division' : div, 'budgets' : budgets, 'events' : events,
'total_budget' : total_budget, 'total_expense' : total_expense })
return books
def get(self, request, club_id, *args, **kwargs):
club = Club.objects.get(pk=club_id)
budget_form = self.budget_form_class()
budget_form.fields['did'].queryset = Division.objects.filter(cid=club)
division_form = self.division_form_class
books = self.generate_books(club.division_set.all())
total_budget = 0
total_expense = 0
for book in books:
total_budget = total_budget + book['total_budget']
total_expense = total_expense + book['total_expense']
return render(request, self.template_name, { 'books': books,
'club': club,
'budget_form' : budget_form,
'division_form' : division_form,
'total_budget' : total_budget,
'total_expense' : total_expense})
def post(self, request, club_id, *args, **kwargs):
club = Club.objects.get(pk=club_id)
budget_form = self.budget_form_class()
budget_form.fields['did'].queryset = Division.objects.filter(cid=club)
division_form = self.division_form_class
if 'division' in request.POST:
division_form = self.division_form_class(request.POST)
if division_form.is_valid():
division = division_form.save()
division.cid = club
division.save()
messages.add_message(request, messages.SUCCESS, 'You Have Created a New Division!')
return HttpResponseRedirect(reverse('suite:budget', args=[club_id]))
else:
messages.add_message(request, messages.WARNING, 'Cannot Make Division with Same Name')
return HttpResponseRedirect(reverse('suite:budget', args=[club_id]))
elif 'budget' in request.POST:
budget_form = self.budget_form_class(request.POST)
if budget_form.is_valid():
budget = budget_form.save(commit=True)
budget.save()
else:
messages.add_message(request, messages.WARNING, 'Could not create budget.')
books = self.generate_books(club.division_set.all())
total_budget = 0
total_expense = 0
for book in books:
total_budget = total_budget + book['total_budget']
total_expense = total_expense + book['total_expense']
return render(request, self.template_name, { 'books' : books,
'club': club,
'budget_form' : budget_form,
'division_form' : division_form,
'total_budget' : total_budget,
'total_expense' : total_expense})
| mit | -6,044,569,809,767,433,000 | 38.196262 | 94 | 0.610157 | false |
regnart-tech-club/programming-concepts | Goals.py | 1 | 1519 | # Rationale:
# * Learn computational thinking
# * thought processes involved in formulating problems and their solutions
# so that the solutions are represented in a form
# that can be effectively carried out by an information-processing agent.
# * compositional reasoning, pattern matching, procedural thinking, recursive thinking
# * break down problems into smaller ones
# * abstract
# * power to scale and deal with complexity
# * consider failure conditions and scenarios
# * generalize solutions
# * form hypotheses
# * experiment
# * Be more productive, efficient, and effective
# * Computers are as much a part of today's life as is reading
#
# Introduce elementary school children to:
# * the concepts of programming (eg structural, object-oriented, functional)
# * good programming practices (eg TDD, DRY)
# * divide-and-conquer to solve problems
# * view problems in different ways (eg think of what can go wrong, break, etc)
# How do we achieve success?
# * Through having fun
# * the maximal level of performance for individuals in a given domain
# is not attained automatically as a function of extended experience,
# but the level of performance can be increased
# even by highly experienced individuals
# as a result of deliberate efforts to improve.
# * the most effective learning requires a well-defined task
# with an appropriate difficulty level for the particular individual,
# informative feedback, and opportunities for repetition and corrections of errors.
| apache-2.0 | 2,031,229,498,634,800,400 | 46.46875 | 88 | 0.760369 | false |
tarbell-project/tarbell | setup.py | 1 | 2186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup, find_packages
from tarbell import __VERSION__ as VERSION
APP_NAME = 'tarbell'
settings = dict()
# Publish Helper.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
settings.update(
name=APP_NAME,
version=VERSION,
author=u'Tarbell Project',
author_email='[email protected]',
url='http://github.com/tarbell-project/tarbell',
license='MIT',
description='A very simple content management system',
long_description="""Read the docs at http://tarbell.readthedocs.org
Tarbell makes it simple to put your work on the web, whether you’re a team of one or a dozen. With Tarbell, you can collaboratively build beautiful websites and publish them with ease.
Tarbell makes use of familiar, flexible tools to take the magic (and frustration) out of publishing to the web. Google spreadsheets handle content management, so changes to your stories are easy to make without touching a line of code. Step-by-step prompts help you set up and configure your project, so that publishing it is a breeze.""",
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=[
"Flask==0.10.1",
"Frozen-Flask==0.11",
"Jinja2==2.7.3",
"Markdown==2.4.1",
"MarkupSafe==0.23",
"PyYAML==3.11",
"boto==2.48.0",
"clint==0.4.1",
"gnureadline>=6.3.3",
"google-api-python-client==1.6.2",
"keyring==5.3",
"oauth2client==1.5.2",
"python-dateutil>=2.2",
"requests==2.3.0",
"sh==1.09",
"six>=1.10.0",
"xlrd==0.9.3",
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
entry_points={
'console_scripts': [
'tarbell = tarbell.cli:main',
],
},
keywords=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
],
)
setup(**settings)
| bsd-3-clause | -7,955,737,072,833,608,000 | 30.2 | 339 | 0.619505 | false |
USGSDenverPychron/pychron | pychron/hardware/pychron_laser.py | 1 | 1127 | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pychron.hardware.fusions.fusions_logic_board import FusionsLogicBoard
# ============= standard library imports ========================
# ============= local library imports ==========================
class PychronLaser(FusionsLogicBoard):
pass
# ============= EOF =============================================
| apache-2.0 | -491,999,547,571,266,600 | 40.740741 | 81 | 0.539485 | false |
djmattyg007/bfinterpreter | bfinterpreter.py | 1 | 6325 | #!/usr/bin/python3
class Tape:
'''
A generic implementation of a record tape for a Turing Machine.
It's bounded on the left side and unbounded on the right side.
It stores only Python integers.
'''
def __init__(self):
self.reset()
def inc_val(self):
self.cells[self.pointer] += 1
def dec_val(self):
self.cells[self.pointer] -= 1
def move_right(self):
self.pointer += 1
if self.pointer == len(self.cells):
self.cells.append(0)
def move_left(self):
if self.pointer == 0:
raise Error("Cannot move past the start of the tape")
self.pointer -= 1
def get_val(self):
return self.cells[self.pointer]
def set_val(self, val):
self.cells[self.pointer] = val
def reset(self):
'''
Reset the tape to the same state it was in when it was
first initialised (ie. empty).
'''
self.cells = [0]
self.pointer = 0
class Brainfuck:
def __init__(self, tape, program, input_tape = None, allow_nested_loops = True, debug = False, eof_ord = 0):
self.tape = tape
self.program = program
self.input_tape = input_tape
self.pointer = 0
self.allow_nested_loops = allow_nested_loops
self.debug = debug
self.eof_ord = eof_ord
self.basic_ops = {
"+" : self.tape.inc_val,
"-" : self.tape.dec_val,
">" : self.tape.move_right,
"<" : self.tape.move_left,
}
def reset(self):
'''
Reset the interpreter to the same state it was in before
program execution commenced.
'''
self.tape.reset()
self.pointer = 0
if self.input_tape is not None:
self.input_tape.seek(0)
def read_input(self):
'''
Read a single character from the input tape supplied to
the interpreter.
'''
if self.input_tape is None:
return self.eof_ord
char = self.input_tape.read(1)
if char == "":
return self.eof_ord
else:
return ord(char)
def end_loop(self):
'''
Call when the start of a loop is encountered and nested loops
are supported. Move to the matching end-of-loop operator.
'''
nested_loop_count = 1
while nested_loop_count > 0:
self.pointer += 1
if self.program[self.pointer] == "]":
nested_loop_count -= 1
elif self.program[self.pointer] == "[":
nested_loop_count += 1
# Small optimisation: skip the end-of-loop operator
self.pointer += 1
def print_val(self):
'''
Print the unicode character represented by the byte value
stored at the current position on the recording tape.
'''
print(chr(self.tape.get_val()), end="")
def run_program(self):
if self.debug == True:
import time
loop_pointers = []
program_length = len(self.program)
while self.pointer < program_length:
char = self.program[self.pointer]
if self.debug == True:
debug_string = str(self.pointer) + "\t" + char + "\t"
if char in self.basic_ops.keys():
self.basic_ops[char]()
self.pointer += 1
elif char == ".":
self.print_val()
self.pointer += 1
elif char == "[":
if self.tape.get_val() == 0:
if self.allow_nested_loops == True:
self.end_loop()
else:
self.pointer = self.program.index("]", self.pointer) + 1
else:
loop_pointers.append(self.pointer)
self.pointer += 1
elif char == "]":
loop_start = loop_pointers.pop()
if self.tape.get_val() == 0:
self.pointer += 1
else:
self.pointer = loop_start
elif char == ",":
charval = self.read_input()
self.tape.set_val(charval)
self.pointer += 1
else:
self.pointer += 1
if self.debug == True:
debug_string += str(self.tape.pointer) + "\t" + str(self.tape.get_val())
if self.input_tape is not None:
debug_string += "\t" + str(self.input_tape.tell())
print("\n" + debug_string)
time.sleep(0.01)
if __name__ == "__main__":
import sys
def read_program_file(filename):
with open(filename, encoding="utf-8") as program_file:
return program_file.read()
def parse_bool(string):
'''
Turn a string representation of a boolean value into an actual
boolean-typed value.
'''
if string in ["true", "y", "yes", "1", "on"]:
return True
elif string in ["false", "n", "no", "0", "off"]:
return False
else:
return None
program = ""
input_tape = None
allow_nested_loops = True
debug = False
eof_ord = 0
dump_tape = False
args = sys.argv[1:]
for x, arg in enumerate(args):
if arg == "--program":
program = args[x + 1]
elif arg == "--program-file":
program = read_program_file(args[x + 1])
elif arg == "--input":
from io import StringIO
input_tape = StringIO(args[x + 1])
elif arg == "--input-file":
input_tape = open(args[x + 1], encoding="utf-8")
elif arg == "--nested-loops":
allow_nested_loops = parse_bool(args[x + 1])
elif arg == "--debug":
debug = parse_bool(args[x + 1])
elif arg == "--eof":
eof_ord = int(args[x + 1])
elif arg == "--dump-tape":
dump_tape = True
tape = Tape()
brainfuck = Brainfuck(tape, program, input_tape, allow_nested_loops, debug, eof_ord)
brainfuck.run_program()
if dump_tape == True:
print("\n" + str(tape.cells))
# Cleanup
if input_tape is not None:
input_tape.close()
| unlicense | 1,415,792,457,444,116,500 | 28.694836 | 112 | 0.502609 | false |
eharney/cinder | cinder/cmd/volume_usage_audit.py | 1 | 10138 | #!/usr/bin/env python
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cron script to generate usage notifications for volumes existing during
the audit period.
Together with the notifications generated by volumes
create/delete/resize, over that time period, this allows an external
system consuming usage notification feeds to calculate volume usage
for each tenant.
Time periods are specified as 'hour', 'month', 'day' or 'year'
- `hour` - previous hour. If run at 9:07am, will generate usage for
8-9am.
- `month` - previous month. If the script is run April 1, it will
generate usages for March 1 through March 31.
- `day` - previous day. if run on July 4th, it generates usages for
July 3rd.
- `year` - previous year. If run on Jan 1, it generates usages for
Jan 1 through Dec 31 of the previous year.
"""
from __future__ import print_function
import datetime
import iso8601
import sys
from oslo_config import cfg
from oslo_log import log as logging
from cinder import i18n
i18n.enable_lazy()
from cinder import context
from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import utils
from cinder import version
import cinder.volume.utils
CONF = cfg.CONF
script_opts = [
cfg.StrOpt('start_time',
help="If this option is specified then the start time "
"specified is used instead of the start time of the "
"last completed audit period."),
cfg.StrOpt('end_time',
help="If this option is specified then the end time "
"specified is used instead of the end time of the "
"last completed audit period."),
cfg.BoolOpt('send_actions',
default=False,
help="Send the volume and snapshot create and delete "
"notifications generated in the specified period."),
]
CONF.register_cli_opts(script_opts)
def _time_error(LOG, begin, end):
if CONF.start_time:
begin = datetime.datetime.strptime(CONF.start_time,
"%Y-%m-%d %H:%M:%S")
if CONF.end_time:
end = datetime.datetime.strptime(CONF.end_time,
"%Y-%m-%d %H:%M:%S")
begin = begin.replace(tzinfo=iso8601.UTC)
end = end.replace(tzinfo=iso8601.UTC)
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
'end': end}
LOG.error(msg)
sys.exit(-1)
return begin, end
def _vol_notify_usage(LOG, volume_ref, extra_info, admin_context):
"""volume_ref notify usage"""
try:
LOG.debug("Send exists notification for <volume_id: "
"%(volume_id)s> <project_id %(project_id)s> "
"<%(extra_info)s>",
{'volume_id': volume_ref.id,
'project_id': volume_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_volume_usage(
admin_context, volume_ref, 'exists', extra_usage_info=extra_info)
except Exception as exc_msg:
LOG.error("Exists volume notification failed: %s",
exc_msg, resource=volume_ref)
def _snap_notify_usage(LOG, snapshot_ref, extra_info, admin_context):
"""snapshot_ref notify usage"""
try:
LOG.debug("Send notification for <snapshot_id: %(snapshot_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'snapshot_id': snapshot_ref.id,
'project_id': snapshot_ref.project_id,
'extra_info': extra_info})
cinder.volume.utils.notify_about_snapshot_usage(
admin_context, snapshot_ref, 'exists', extra_info)
except Exception as exc_msg:
LOG.error("Exists snapshot notification failed: %s",
exc_msg, resource=snapshot_ref)
def _backup_notify_usage(LOG, backup_ref, extra_info, admin_context):
"""backup_ref notify usage"""
try:
cinder.volume.utils.notify_about_backup_usage(
admin_context, backup_ref, 'exists', extra_info)
LOG.debug("Sent notification for <backup_id: %(backup_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'backup_id': backup_ref.id,
'project_id': backup_ref.project_id,
'extra_info': extra_info})
except Exception as exc_msg:
LOG.error("Exists backups notification failed: %s", exc_msg)
def _create_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.created_at),
'audit_period_ending': str(obj_ref.created_at),
}
LOG.debug("Send create notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'create.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'create.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Create %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _delete_action(obj_ref, admin_context, LOG, notify_about_usage,
type_id_str, type_name):
try:
local_extra_info = {
'audit_period_beginning': str(obj_ref.deleted_at),
'audit_period_ending': str(obj_ref.deleted_at),
}
LOG.debug("Send delete notification for <%(type_id_str)s: %(_id)s> "
"<project_id %(project_id)s> <%(extra_info)s>",
{'type_id_str': type_id_str,
'_id': obj_ref.id,
'project_id': obj_ref.project_id,
'extra_info': local_extra_info})
notify_about_usage(admin_context, obj_ref,
'delete.start', extra_usage_info=local_extra_info)
notify_about_usage(admin_context, obj_ref,
'delete.end', extra_usage_info=local_extra_info)
except Exception as exc_msg:
LOG.error("Delete %(type)s notification failed: %(exc_msg)s",
{'type': type_name, 'exc_msg': exc_msg}, resource=obj_ref)
def _obj_ref_action(_notify_usage, LOG, obj_ref, extra_info, admin_context,
begin, end, notify_about_usage, type_id_str, type_name):
_notify_usage(LOG, obj_ref, extra_info, admin_context)
if CONF.send_actions:
if begin < obj_ref.created_at < end:
_create_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
if obj_ref.deleted_at and begin < obj_ref.deleted_at < end:
_delete_action(obj_ref, admin_context, LOG,
notify_about_usage, type_id_str, type_name)
def main():
objects.register_all()
admin_context = context.get_admin_context()
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
LOG = logging.getLogger("cinder")
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
begin, end = _time_error(LOG, begin, end)
LOG.info("Starting volume usage audit")
LOG.info("Creating usages for %(begin_period)s until %(end_period)s",
{"begin_period": begin, "end_period": end})
extra_info = {
'audit_period_beginning': str(begin),
'audit_period_ending': str(end),
}
volumes = objects.VolumeList.get_all_active_by_window(admin_context,
begin,
end)
LOG.info("Found %d volumes", len(volumes))
for volume_ref in volumes:
_obj_ref_action(_vol_notify_usage, LOG, volume_ref, extra_info,
admin_context, begin, end,
cinder.volume.utils.notify_about_volume_usage,
"volume_id", "volume")
snapshots = objects.SnapshotList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d snapshots", len(snapshots))
for snapshot_ref in snapshots:
_obj_ref_action(_snap_notify_usage, LOG, snapshot_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_snapshot_usage,
"snapshot_id", "snapshot")
backups = objects.BackupList.get_all_active_by_window(admin_context,
begin, end)
LOG.info("Found %d backups", len(backups))
for backup_ref in backups:
_obj_ref_action(_backup_notify_usage, LOG, backup_ref, extra_info,
admin_context, begin,
end, cinder.volume.utils.notify_about_backup_usage,
"backup_id", "backup")
LOG.info("Volume usage audit completed")
| apache-2.0 | -8,139,628,104,453,433,000 | 40.044534 | 78 | 0.577037 | false |
Davideddu/python-liquidcrystal | setup.py | 1 | 1093 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname), "r") as f:
return f.read()
setup(
name="liquidcrystal",
version="0.1",
author="Davide Depau",
author_email="[email protected]",
description="A Python port of Arduino's LiquidCrystal library that uses PyWiring to access an HD44780-based LCD "
"display through any supported I/O port.",
license="GPLv2",
keywords="lcd pywiring i2c gpio parallel serial liquidcrystal display",
url="http://github.com/Davidedd/python-liquidcrystal",
packages=['liquidcrystal'],
long_description=read('README.md'),
requires=["pywiring", "numpy"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
"Programming Language :: Python",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
],
)
| gpl-2.0 | -760,723,976,487,144,700 | 31.147059 | 117 | 0.650503 | false |
kkleidal/kentf | config.py | 1 | 8940 | #!/usr/bin/env python3
import sys
import argparse
import os
import argparse
import yaml
import json
from attrdict import AttrDict
class ConfigKWArg:
def __init__(self, name, example, type=str, default=None, required=False, short_name=None, nargs=1, help=None):
self.name = name
self.type = type
self.default = default
if nargs != 1:
self.default = []
self.required = required
self.short_name = short_name
self.nargs = nargs
self.help = help
self.example = example
def add_argument_to(self, parser):
args = []
kwargs = {}
if self.short_name is not None:
args.append("-%s" % self.short_name)
args.append("--%s" % self.name)
kwargs["dest"] = self.name
if isinstance(self.type, str) and self.type.startswith("file"):
kwargs["type"] = str
else:
kwargs["type"] = self.type
if self.nargs != 1:
kwargs["nargs"] = self.nargs
if self.help is not None:
kwargs["help"] = self.help
kwargs["default"] = None
parser.add_argument(*args, **kwargs)
def get_example(self):
if self.nargs != 1 and not isinstance(self.example, list):
return [self.example]
else:
return self.example
def update_config(self, config, config_from_file):
if config.get(self.name, None) is None:
config[self.name] = config_from_file.get(self.name, None)
elif self.nargs != 1:
config[self.name] = config.get(self.name, []) + config_from_file.get(self.name, [])
def postprocess_config(self, config):
if config.get(self.name, None) is None:
if self.required:
raise MissingArgumentError(self.name)
config[self.name] = self.default
return
if isinstance(self.type, str) and self.type.startswith("file"):
if self.type == "filew" or self.type == "filer":
filenames = resolve_filenames(config[self.name], mode=self.type[-1])
if len(filenames) > 1:
raise MultipleFilesError()
if len(filenames) == 0:
raise NoSuchFileError()
config[self.name] == filenames[0]
elif self.type == "filesw" or self.type == "filesr":
if isinstance(config[self.name], str):
filenames = resolve_filenames(config[self.name], mode=self.type[-1])
else:
filenames = []
for name in config[self.name]:
filenames.extend(resolve_filenames(name, mode=self.type[-1]))
config[self.name] = filenames
class ConfigHelper:
def __init__(self, desc):
self.desc = desc
self.args = []
self.reserved_long = {'help', 'yaml-config', 'json-config', 'yaml-stub', 'json-stub', 'save-json-config', 'save-yaml-config'}
self.reserved_short = {'help', 'yc', 'jc'}
self.args_long_hash = {}
self.args_short_hash = {}
def add_argument(self, arg):
self.args.append(arg)
if arg.name in self.reserved_long:
raise DuplicateArgumentError(arg.name)
else:
self.args_long_hash[arg.name] = arg
self.reserved_long.add(arg.name)
if arg.short_name is not None:
if arg.short_name in self.reserved_short:
raise DuplicateArgumentError(arg.short_name)
else:
self.args_short_hash[arg.short_name] = arg
self.reserved_short.add(arg.short_name)
def _make_stub(self, fileobj, dump, config=None):
obj = {arg.name: arg.get_example() for arg in self.args}
if config is not None:
obj = {arg.name: config[arg.name] for arg in self.args if arg.name in config}
dump(obj, fileobj)
def make_yaml_stub(self, fileobj, config=None):
self._make_stub(fileobj, lambda x, f: yaml.dump(x, f, indent=2, default_flow_style=False), config)
def make_json_stub(self, fileobj, config=None):
self._make_stub(fileobj, lambda x, f: json.dump(x, f, indent=2, separators=(',', ': '), sort_keys=True), config)
def parse_args(self):
parser = argparse.ArgumentParser(self.desc)
for arg in self.args:
arg.add_argument_to(parser)
parser.add_argument('-yc', '--yaml-config', dest="yaml_config", type=str, default=None,
help="YAML configuration file to load to specify default args")
parser.add_argument('-jc', '--json-config', dest="json_config", type=str, default=None,
help="JSON configuration file to load to specify default args")
parser.add_argument('--save-yaml-config', dest="save_yaml_config", type=str, default=None,
help="Save configuration to this YAML file")
parser.add_argument('--save-json-config', dest="save_json_config", type=str, default=None,
help="Save configuration to this JSON file")
parser.add_argument('--yaml-stub', dest="yaml_stub", action='store_true',
help="Make stub YAML config file with example arguments")
parser.add_argument('--json-stub', dest="json_stub", action='store_true',
help="Make stub JSON config file with example arguments")
config = AttrDict(vars(parser.parse_args()))
if config.yaml_stub:
with FileOpener(sys.stdout, "w") as f:
self.make_yaml_stub(f)
sys.exit(0)
if config.json_stub:
with FileOpener(sys.stdout, "w") as f:
self.make_json_stub(f)
sys.exit(0)
if config.yaml_config is not None or config.json_config is not None:
if config.yaml_config is not None and config.json_config is not None:
raise RuntimeError("YAML config and JSON config files specified. You can only choose 1.")
elif config.yaml_config is not None:
with FileOpener(config.yaml_config, "r") as f:
config_file = yaml.load(f)
elif config.json_config is not None:
with FileOpener(config.json_config, "r") as f:
config_file = json.load(f)
for arg in self.args:
arg.update_config(config, config_file)
for arg in self.args:
arg.postprocess_config(config)
if config.save_yaml_config:
with FileOpener(config.save_yaml_config, "w") as f:
self.make_yaml_stub(f, config=config)
if config.save_json_config:
with FileOpener(config.save_json_config, "w") as f:
self.make_json_stub(f, config=config)
del config["yaml_config"]
del config["json_config"]
del config["save_yaml_config"]
del config["save_json_config"]
del config["yaml_stub"]
del config["json_stub"]
return config
def resolve_filenames(filename, mode="w"):
if filename == "-":
if len(mode) == 0 or mode[0] == "r":
return ["/dev/stdin"]
else:
return ["/dev/stdout"]
elif filename == "&0":
return ["/dev/stdin"]
elif filename == "&1":
return ["/dev/stdout"]
elif filename == "&2":
return ["/dev/stderr"]
else:
return [filename]
class NoSuchFileError(RuntimeError):
pass
class MultipleFilesError(RuntimeError):
pass
class ArgumentError(RuntimeError):
def __init__(self, arg, message=None):
super(ArgumentError, self).__init__(message or "Argument '%s' was invalid." % arg)
self.argument = arg
class MissingArgumentError(ArgumentError):
def __init__(self, arg):
super(MissingArgumentError, self).__init__(arg, "Argument '%s' was missing." % arg)
class DuplicateArgumentError(ArgumentError):
def __init__(self, arg):
super(DuplicateArgumentError, self).__init__(arg, "Argument '%s' occurred more than once." % arg)
class FileOpener:
def __init__(self, fileobj, mode='r'):
self.closeobj = False
if isinstance(fileobj, str):
filenames = resolve_filenames(fileobj, mode=mode)
if len(filenames) == 0:
raise NoSuchFileError()
elif len(filenames) > 1:
raise MultipleFilesError()
self.fileobj = open(filenames[0], mode)
self.closeobj = True
else:
self.fileobj = fileobj
def write(self, *args, **kwargs):
self.fileobj.write(*args, **kwargs)
def read(self, *args, **kwargs):
return self.fileobj.read(*args, **kwargs)
def close(self):
if self.closeobj:
self.fileobj.close()
def __enter__(self):
return self
def __exit__(self ,type, value, traceback):
if self.closeobj:
self.close()
| mit | 6,601,954,506,612,758,000 | 38.210526 | 133 | 0.576063 | false |
agrover/targetd | targetd/fs.py | 1 | 10302 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2012, Andy Grover <[email protected]>
# Copyright 2013, Tony Asleson <[email protected]>
#
# fs support using btrfs.
import os
import time
from targetd.nfs import Nfs, Export
from targetd.utils import invoke, TargetdError
# Notes:
#
# User can configure block pools (lvm volume groups) 1 to many or 0-many file
# system mount points to be used as pools. At this time you have to specify
# a block pool for block operations and file system mount point pool for FS
# operations. We could use files on a file system for block too and create
# file systems on top of lvm too, but that is TBD.
#
# We are using btrfs to provide all the cool fast FS features. User supplies a
# btrfs mount point and we create a targetd_fs and targetd_ss subvolumes. Each
# time the user creates a file system we are creating a subvolume under fs.
# Each time a FS clone is made we create the clone under fs. For each snapshot
# (RO clone) we are creating a read only snapshot in
# <mount>/targetd_ss/<fsname>/<snapshot name>
#
# There may be better ways of utilizing btrfs.
import logging as log
fs_path = "targetd_fs"
ss_path = "targetd_ss"
fs_cmd = 'btrfs'
pools = []
def initialize(config_dict):
global pools
pools = config_dict['fs_pools']
for pool in pools:
# Make sure we have the appropriate subvolumes available
try:
create_sub_volume(os.path.join(pool, fs_path))
create_sub_volume(os.path.join(pool, ss_path))
except TargetdError as e:
log.error('Unable to create required subvolumes {0}'.format(e))
raise
return dict(
fs_list=fs,
fs_destroy=fs_destroy,
fs_create=fs_create,
fs_clone=fs_clone,
ss_list=ss,
fs_snapshot=fs_snapshot,
fs_snapshot_delete=fs_snapshot_delete,
nfs_export_auth_list=nfs_export_auth_list,
nfs_export_list=nfs_export_list,
nfs_export_add=nfs_export_add,
nfs_export_remove=nfs_export_remove,
)
def create_sub_volume(p):
if not os.path.exists(p):
invoke([fs_cmd, 'subvolume', 'create', p])
def split_stdout(out):
"""
Split the text out as an array of text arrays.
"""
strip_it = '<FS_TREE>/'
rc = []
for line in out.split('\n'):
elem = line.split(' ')
if len(elem) > 1:
tmp = []
for z in elem:
if z.startswith(strip_it):
tmp.append(z[len(strip_it):])
else:
tmp.append(z)
rc.append(tmp)
return rc
def fs_space_values(mount_point):
"""
Return a tuple (total, free) from the specified path
"""
st = os.statvfs(mount_point)
free = (st.f_bavail * st.f_frsize)
total = (st.f_blocks * st.f_frsize)
return total, free
def pool_check(pool_name):
"""
pool_name *cannot* be trusted, funcs taking a pool param must call
this or to ensure passed-in pool name is one targetd has
been configured to use.
"""
if pool_name not in pools:
raise TargetdError(-110, "Invalid filesystem pool")
def fs_create(req, pool_name, name, size_bytes):
pool_check(pool_name)
full_path = os.path.join(pool_name, fs_path, name)
if not os.path.exists(full_path):
invoke([fs_cmd, 'subvolume', 'create', full_path])
else:
raise TargetdError(-53, 'FS already exists')
def fs_snapshot(req, fs_uuid, dest_ss_name):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
if fs_ht:
source_path = os.path.join(fs_ht['pool'], fs_path, fs_ht['name'])
dest_base = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'])
dest_path = os.path.join(dest_base, dest_ss_name)
create_sub_volume(dest_base)
if os.path.exists(dest_path):
raise TargetdError(-53, "Snapshot already exists with that name")
invoke([fs_cmd, 'subvolume', 'snapshot', '-r', source_path, dest_path])
def fs_snapshot_delete(req, fs_uuid, ss_uuid):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
snapshot = _get_ss_by_uuid(req, fs_uuid, ss_uuid, fs_ht)
path = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'],
snapshot['name'])
fs_subvolume_delete(path)
def fs_subvolume_delete(path):
invoke([fs_cmd, 'subvolume', 'delete', path])
def fs_destroy(req, uuid):
# Check to see if this file system has any read-only snapshots, if yes then
# delete. The API requires a FS to list its RO copies, we may want to
# reconsider this decision.
fs_ht = _get_fs_by_uuid(req, uuid)
base_snapshot_dir = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'])
snapshots = ss(req, uuid)
for s in snapshots:
fs_subvolume_delete(os.path.join(base_snapshot_dir, s['name']))
if os.path.exists(base_snapshot_dir):
fs_subvolume_delete(base_snapshot_dir)
fs_subvolume_delete(os.path.join(fs_ht['pool'], fs_path, fs_ht['name']))
def fs_pools(req):
results = []
for pool in pools:
total, free = fs_space_values(pool)
results.append(dict(name=pool, size=total, free_size=free, type='fs'))
return results
def _invoke_retries(command, throw_exception):
# TODO take out this loop, used to handle bug in btrfs
# ERROR: Failed to lookup path for root 0 - No such file or directory
for i in range(0, 5):
result, out, err = invoke(command, False)
if result == 0:
return result, out, err
elif result == 19:
time.sleep(1)
continue
else:
raise TargetdError(-303, "Unexpected exit code %d" % result)
raise TargetdError(-303, "Unable to execute command after "
"multiple retries %s" % (str(command)))
def _fs_hash():
fs_list = {}
for pool in pools:
full_path = os.path.join(pool, fs_path)
result, out, err = _invoke_retries(
[fs_cmd, 'subvolume', 'list', '-ua', pool], False)
data = split_stdout(out)
if len(data):
(total, free) = fs_space_values(full_path)
for e in data:
sub_vol = e[10]
prefix = fs_path + os.path.sep
if sub_vol[:len(prefix)] == prefix:
key = os.path.join(pool, sub_vol)
fs_list[key] = dict(name=sub_vol[len(prefix):],
uuid=e[8],
total_space=total,
free_space=free,
pool=pool,
full_path=key)
return fs_list
def fs(req):
return list(_fs_hash().values())
def ss(req, fs_uuid, fs_cache=None):
snapshots = []
if fs_cache is None:
fs_cache = _get_fs_by_uuid(req, fs_uuid)
full_path = os.path.join(fs_cache['pool'], ss_path, fs_cache['name'])
if os.path.exists(full_path):
result, out, err = _invoke_retries([fs_cmd, 'subvolume', 'list', '-s',
full_path], False)
data = split_stdout(out)
if len(data):
for e in data:
ts = "%s %s" % (e[10], e[11])
time_epoch = int(time.mktime(
time.strptime(ts, '%Y-%m-%d %H:%M:%S')))
st = dict(name=e[-1], uuid=e[-3], timestamp=time_epoch)
snapshots.append(st)
return snapshots
def _get_fs_by_uuid(req, fs_uuid):
for f in fs(req):
if f['uuid'] == fs_uuid:
return f
def _get_ss_by_uuid(req, fs_uuid, ss_uuid, fs_ht=None):
if fs_ht is None:
fs_ht = _get_fs_by_uuid(req, fs_uuid)
for s in ss(req, fs_uuid, fs_ht):
if s['uuid'] == ss_uuid:
return s
def fs_clone(req, fs_uuid, dest_fs_name, snapshot_id):
fs_ht = _get_fs_by_uuid(req, fs_uuid)
if not fs_ht:
raise TargetdError(-104, "fs_uuid not found")
if snapshot_id:
snapshot = _get_ss_by_uuid(req, fs_uuid, snapshot_id)
if not snapshot:
raise TargetdError(-112, "snapshot not found")
source = os.path.join(fs_ht['pool'], ss_path, fs_ht['name'],
snapshot['name'])
dest = os.path.join(fs_ht['pool'], fs_path, dest_fs_name)
else:
source = os.path.join(fs_ht['pool'], fs_path, fs_ht['name'])
dest = os.path.join(fs_ht['pool'], fs_path, dest_fs_name)
if os.path.exists(dest):
raise TargetdError(-51, "Filesystem with that name exists")
invoke([fs_cmd, 'subvolume', 'snapshot', source, dest])
def nfs_export_auth_list(req):
return Nfs.security_options()
def nfs_export_list(req):
rc = []
exports = Nfs.exports()
for e in exports:
rc.append(dict(host=e.host, path=e.path, options=e.options_list()))
return rc
def nfs_export_add(req, host, path, export_path, options):
if export_path is not None:
raise TargetdError(-401, "separate export path not supported at "
"this time")
bit_opt = 0
key_opt = {}
for o in options:
if '=' in o:
k, v = o.split('=')
key_opt[k] = v
else:
bit_opt |= Export.bool_option[o]
Nfs.export_add(host, path, bit_opt, key_opt)
def nfs_export_remove(req, host, path):
found = False
for e in Nfs.exports():
if e.host == host and e.path == path:
Nfs.export_remove(e)
found = True
if not found:
raise TargetdError(
-400, "NFS export to remove not found %s:%s", (host, path))
| gpl-3.0 | -551,828,838,198,015,300 | 28.603448 | 79 | 0.584158 | false |
philipkershaw/ndg_security_server | ndg/security/server/wsgi/authz/pep.py | 1 | 22782 | '''NDG Security Policy Enforcement Point Module
__author__ = "P J Kershaw"
__date__ = "11/07/10"
__copyright__ = "(C) 2010 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
'''
import logging
log = logging.getLogger(__name__)
import re
import httplib
from urllib2 import URLError
from time import time
import webob
from ndg.soap.client import UrlLib2SOAPClientError
from ndg.saml.saml2.core import DecisionType
from ndg.saml.saml2.binding.soap.client.requestbase import \
RequestBaseSOAPBinding
from ndg.saml.saml2.binding.soap.client.authzdecisionquery import \
AuthzDecisionQuerySslSOAPBinding
from ndg.xacml.core import Identifiers as XacmlIdentifiers
from ndg.xacml.core import context as _xacmlCtx
from ndg.xacml.core.attribute import Attribute as XacmlAttribute
from ndg.xacml.core.attributevalue import (
AttributeValueClassFactory as XacmlAttributeValueClassFactory,
AttributeValue as XacmlAttributeValue)
from ndg.xacml.core.context.result import Decision as XacmlDecision
from ndg.xacml.core.context.pdp import PDP
from ndg.xacml.parsers.etree.factory import (
ReaderFactory as XacmlPolicyReaderFactory)
from ndg.security.server.wsgi.session import (SessionMiddlewareBase,
SessionHandlerMiddleware)
from ndg.security.common.credentialwallet import SAMLAssertionWallet
from ndg.security.common.utils import str2Bool
class SamlPepFilterConfigError(Exception):
"""Error with SAML PEP configuration settings"""
class SamlPepFilterBase(SessionMiddlewareBase):
'''Policy Enforcement Point for ESG with SAML based Interface
:requires: ndg.security.server.wsgi.session.SessionHandlerMiddleware
instance upstream in the WSGI stack.
:cvar AUTHZ_DECISION_QUERY_PARAMS_PREFIX: prefix for SAML authorisation
decision query options in config file
:type AUTHZ_DECISION_QUERY_PARAMS_PREFIX: string
:cvar PARAM_NAMES: list of config option names
:type PARAM_NAMES: tuple
:ivar __client: SAML authorisation decision query client
:type __client: ndg.saml.saml2.binding.soap.client.authzdecisionquery.AuthzDecisionQuerySslSOAPBinding
:ivar ignore_file_list_pat: a list of regular expressions for resource paths
ignored by the authorisation policy. Resources matching these patterns
circumvent the authorisation policy. This setting needs to be made
carefully!
:type ignore_file_list_pat: list
'''
AUTHZ_SERVICE_URI = 'authzServiceURI'
AUTHZ_DECISION_QUERY_PARAMS_PREFIX = 'authzDecisionQuery.'
SESSION_KEY_PARAM_NAME = 'sessionKey'
CACHE_DECISIONS_PARAM_NAME = 'cacheDecisions'
LOCAL_POLICY_FILEPATH_PARAM_NAME = 'localPolicyFilePath'
CREDENTIAL_WALLET_SESSION_KEYNAME = \
SessionHandlerMiddleware.CREDENTIAL_WALLET_SESSION_KEYNAME
USERNAME_SESSION_KEYNAME = \
SessionHandlerMiddleware.USERNAME_SESSION_KEYNAME
PARAM_NAMES = (
AUTHZ_SERVICE_URI,
SESSION_KEY_PARAM_NAME,
CACHE_DECISIONS_PARAM_NAME,
LOCAL_POLICY_FILEPATH_PARAM_NAME
)
XACML_ATTRIBUTEVALUE_CLASS_FACTORY = XacmlAttributeValueClassFactory()
__slots__ = (
'_app', '__client', '__session', '__localPdp'
) + tuple(('__' + '$__'.join(PARAM_NAMES)).split('$'))
def __init__(self, app):
'''
Add reference to next WSGI middleware/app and create a SAML
authorisation decision query client interface
'''
self._app = app
self.__client = AuthzDecisionQuerySslSOAPBinding()
self.__session = None
self.__authzServiceURI = None
self.__sessionKey = None
self.__cacheDecisions = False
self.__localPdp = None
self.__localPolicyFilePath = None
self.ignore_file_list_pat = None
def _getLocalPolicyFilePath(self):
return self.__localPolicyFilePath
def _setLocalPolicyFilePath(self, value):
if not isinstance(value, basestring):
raise TypeError('Expecting string type for "localPolicyFilePath" '
'attribute; got %r' % type(value))
self.__localPolicyFilePath = value
localPolicyFilePath = property(_getLocalPolicyFilePath,
_setLocalPolicyFilePath,
doc="Policy file path for local PDP. It's "
"initialised to None in which case the "
"local PDP is disabled and all access "
"control queries will be routed through "
"to the authorisation service")
def _getLocalPdp(self):
return self.__localPdp
def _setLocalPdp(self, value):
self.__localPdp = value
localPdp = property(_getLocalPdp, _setLocalPdp,
doc="File path for a local PDP which can be used to "
"filters requests from the authorisation service "
"so avoiding the web service call performance "
"penalty")
def _getClient(self):
return self.__client
def _setClient(self, value):
if not isinstance(value, RequestBaseSOAPBinding):
raise TypeError('Expecting type %r for "client" attribute; '
'got %r' %
(type(RequestBaseSOAPBinding), type(value)))
self.__client = value
client = property(_getClient, _setClient,
doc="SAML authorisation decision query SOAP client")
def _getSession(self):
return self.__session
def _setSession(self, value):
self.__session = value
session = property(_getSession, _setSession,
doc="Beaker Security Session instance")
def _getAuthzServiceURI(self):
return self.__authzServiceURI
def _setAuthzServiceURI(self, value):
if not isinstance(value, basestring):
raise TypeError('Expecting string type for "authzServiceURI" '
'attribute; got %r' % type(value))
self.__authzServiceURI = value
authzServiceURI = property(_getAuthzServiceURI, _setAuthzServiceURI,
doc="Authorisation Service URI")
def _getSessionKey(self):
return self.__sessionKey
def _setSessionKey(self, value):
if not isinstance(value, basestring):
raise TypeError('Expecting string type for "sessionKey" attribute; '
'got %r' % type(value))
self.__sessionKey = value
sessionKey = property(_getSessionKey, _setSessionKey,
doc="environ key name for Beaker session object")
def _getCacheDecisions(self):
return self.__cacheDecisions
def _setCacheDecisions(self, value):
if isinstance(value, basestring):
self.__cacheDecisions = str2Bool(value)
elif isinstance(value, bool):
self.__cacheDecisions = value
else:
raise TypeError('Expecting bool/string type for "cacheDecisions" '
'attribute; got %r' % type(value))
cacheDecisions = property(_getCacheDecisions, _setCacheDecisions,
doc="Set to True to make the session cache "
"authorisation decisions returned from the "
"Authorisation Service")
def initialise(self, prefix='', **kw):
'''Initialise object from keyword settings
:type prefix: basestring
:param prefix: prefix for configuration items
:type kw: dict
:param kw: configuration settings
dictionary
:raise SamlPepFilterConfigError: missing option setting(s)
'''
# Parse other options
for name in SamlPepFilter.PARAM_NAMES:
paramName = prefix + name
value = kw.get(paramName)
if value is not None:
setattr(self, name, value)
elif name != self.__class__.LOCAL_POLICY_FILEPATH_PARAM_NAME:
# Policy file setting is optional
raise SamlPepFilterConfigError('Missing option %r' % paramName)
# Parse authorisation decision query options
queryPrefix = prefix + self.__class__.AUTHZ_DECISION_QUERY_PARAMS_PREFIX
self.client.parseKeywords(prefix=queryPrefix, **kw)
# Initialise the local PDP
if self.localPolicyFilePath:
self.__localPdp = PDP.fromPolicySource(self.localPolicyFilePath,
XacmlPolicyReaderFactory)
@classmethod
def filter_app_factory(cls, app, global_conf, prefix='', **app_conf):
"""Set-up using a Paste app factory pattern.
:type app: callable following WSGI interface
:param app: next middleware application in the chain
:type global_conf: dict
:param global_conf: PasteDeploy global configuration dictionary
:type prefix: basestring
:param prefix: prefix for configuration items
:type app_conf: dict
:param app_conf: PasteDeploy application specific configuration
dictionary
"""
app = cls(app)
app.initialise(prefix=prefix, **app_conf)
return app
def __call__(self, environ, start_response):
"""Intercept request and call authorisation service to make an access
control decision
:type environ: dict
:param environ: WSGI environment variables dictionary
:type start_response: function
:param start_response: standard WSGI start response function
:rtype: iterable
:return: response
"""
# Get reference to session object - SessionHandler middleware must be in
# place upstream of this middleware in the WSGI stack
if self.sessionKey not in environ:
raise SamlPepFilterConfigError('No beaker session key "%s" found '
'in environ' % self.sessionKey)
self.session = environ[self.sessionKey]
return self.enforce(environ, start_response)
def enforce(self, environ, start_response):
"""Get access control decision from PDP(s) and enforce the decision
:type environ: dict
:param environ: WSGI environment variables dictionary
:type start_response: function
:param start_response: standard WSGI start response function
:rtype: iterable
:return: response
"""
raise NotImplementedError("SamlPepFilterBase must be subclassed to"
" implement the enforce method.")
def _retrieveCachedAssertions(self, resourceId):
"""Return assertions containing authorisation decision for the given
resource ID.
:param resourceId: search for decisions for this resource Id
:type resourceId: basestring
:return: assertion containing authorisation decision for the given
resource ID or None if no wallet has been set or no assertion was
found matching the input resource Id
:rtype: ndg.saml.saml2.core.Assertion / None type
"""
# Get reference to wallet
walletKeyName = self.__class__.CREDENTIAL_WALLET_SESSION_KEYNAME
credWallet = self.session.get(walletKeyName)
if credWallet is None:
return None
# Wallet has a dictionary of credential objects keyed by resource ID
return credWallet.retrieveCredentials(resourceId)
def _cacheAssertions(self, resourceId, assertions):
"""Cache an authorisation decision from a response retrieved from the
authorisation service. This is invoked only if cacheDecisions boolean
is set to True
:param resourceId: search for decisions for this resource Id
:type resourceId: basestring
:param assertions: list of SAML assertions containing authorisation
decision statements
:type assertions: iterable
"""
walletKeyName = self.__class__.CREDENTIAL_WALLET_SESSION_KEYNAME
credWallet = self.session.get(walletKeyName)
if credWallet is None:
credWallet = SAMLAssertionWallet()
# Fix: make wallet follow the same clock skew tolerance and as the
# SAML authz decision query settings
credWallet.clockSkewTolerance = self.client.clockSkewTolerance
credWallet.addCredentials(resourceId, assertions)
self.session[walletKeyName] = credWallet
self.session.save()
def saveResultCtx(self, request, response, save=True):
"""Set PEP context information in the Beaker session using standard key
names. This is a snapshot of the last request and the response
received. It can be used by downstream middleware to provide contextual
information about authorisation decisions
:param session: beaker session
:type session: beaker.session.SessionObject
:param request: authorisation decision query
:type request: ndg.saml.saml2.core.AuthzDecisionQuery
:param response: authorisation response
:type response: ndg.saml.saml2.core.Response
:param save: determines whether session is saved or not
:type save: bool
"""
self.session[self.__class__.PEPCTX_SESSION_KEYNAME] = {
self.__class__.PEPCTX_REQUEST_SESSION_KEYNAME: request,
self.__class__.PEPCTX_RESPONSE_SESSION_KEYNAME: response,
self.__class__.PEPCTX_TIMESTAMP_SESSION_KEYNAME: time()
}
if save:
self.session.save()
PDP_DENY_RESPONSES = (
XacmlDecision.DENY_STR, XacmlDecision.INDETERMINATE_STR
)
def isApplicableRequest(self, resourceURI):
"""A local PDP can filter out some requests to avoid the need to call
out to the authorisation service
:param resourceURI: URI of requested resource
:type resourceURI: basestring
"""
# Apply a list of regular expressions to filter out files which can be
# ignored
if self.ignore_file_list_pat is not None:
for pat in self.ignore_file_list_pat:
if re.match(pat, resourceURI):
return False
elif self.__localPdp is None:
log.debug("No Local PDP set: passing on request to main "
"authorisation service...")
return True
else:
xacmlRequest = self._createXacmlRequestCtx(resourceURI)
xacmlResponse = self.__localPdp.evaluate(xacmlRequest)
for result in xacmlResponse.results:
if result.decision.value != XacmlDecision.NOT_APPLICABLE_STR:
log.debug("Local PDP returned %s decision, passing request "
"on to main authorisation service ...",
result.decision.value)
return True
return False
def _createXacmlRequestCtx(self, resourceURI):
"""Wrapper to create a request context for a local PDP - see
isApplicableRequest
:param resourceURI: URI of requested resource
:type resourceURI: basestring
"""
request = _xacmlCtx.request.Request()
resource = _xacmlCtx.request.Resource()
resourceAttribute = XacmlAttribute()
resource.attributes.append(resourceAttribute)
resourceAttribute.attributeId = XacmlIdentifiers.Resource.RESOURCE_ID
XacmlAnyUriAttributeValue = \
self.__class__.XACML_ATTRIBUTEVALUE_CLASS_FACTORY(
XacmlAttributeValue.ANY_TYPE_URI)
resourceAttribute.dataType = XacmlAnyUriAttributeValue.IDENTIFIER
resourceAttribute.attributeValues.append(XacmlAnyUriAttributeValue())
resourceAttribute.attributeValues[-1].value = resourceURI
request.resources.append(resource)
return request
class SamlPepFilter(SamlPepFilterBase):
def enforce(self, environ, start_response):
"""Get access control decision from PDP(s) and enforce the decision
:type environ: dict
:param environ: WSGI environment variables dictionary
:type start_response: function
:param start_response: standard WSGI start response function
:rtype: iterable
:return: response
"""
request = webob.Request(environ)
requestURI = request.url
# Nb. user may not be logged in hence REMOTE_USER is not set
remoteUser = request.remote_user or ''
# Apply local PDP if set
if not self.isApplicableRequest(requestURI):
# The local PDP has returned a decision that the requested URI is
# not applicable and so the authorisation service need not be
# invoked. This step is an efficiency measure to avoid multiple
# callouts to the authorisation service for resources which
# obviously don't need any restrictions
return self._app(environ, start_response)
# Check for cached decision
if self.cacheDecisions:
assertions = self._retrieveCachedAssertions(requestURI)
else:
assertions = None
noCachedAssertion = assertions is None or len(assertions) == 0
if noCachedAssertion:
# No stored decision in cache, invoke the authorisation service
query = self.client.makeQuery()
query.resource = request.url
self.client.setQuerySubjectId(query, remoteUser)
try:
samlAuthzResponse = self.client.send(query,
uri=self.authzServiceURI)
except (UrlLib2SOAPClientError, URLError) as e:
import traceback
if isinstance(e, UrlLib2SOAPClientError):
log.error("Error, HTTP %s response from authorisation "
"service %r requesting access to %r: %s",
e.urllib2Response.code,
self.authzServiceURI,
requestURI,
traceback.format_exc())
else:
log.error("Error, calling authorisation service %r "
"requesting access to %r: %s",
self.authzServiceURI,
requestURI,
traceback.format_exc())
response = webob.Response()
response.status = httplib.FORBIDDEN
response.body = ('An error occurred retrieving an access '
'decision for %r for user %r' %
(requestURI, remoteUser))
response.content_type = 'text/plain'
return response(environ, start_response)
assertions = samlAuthzResponse.assertions
# Record the result in the user's session to enable later
# interrogation by any result handler Middleware
self.saveResultCtx(query, samlAuthzResponse)
# Set HTTP 403 Forbidden response if any of the decisions returned are
# deny or indeterminate status
failDecisions = (DecisionType.DENY, #@UndefinedVariable
DecisionType.INDETERMINATE) #@UndefinedVariable
# Review decision statement(s) in assertions and enforce the decision
assertion = None
for assertion in assertions:
for authzDecisionStatement in assertion.authzDecisionStatements:
if authzDecisionStatement.decision.value in failDecisions:
response = webob.Response()
if not remoteUser:
# Access failed and the user is not logged in
response.status = httplib.UNAUTHORIZED
else:
# The user is logged in but not authorised
response.status = httplib.FORBIDDEN
response.body = 'Access denied to %r for user %r' % (
requestURI,
remoteUser)
response.content_type = 'text/plain'
log.info(response.body)
return response(environ, start_response)
if assertion is None:
log.error("No assertions set in authorisation decision response "
"from %r", self.authzServiceURI)
response = webob.Response()
response.status = httplib.FORBIDDEN
response.body = ('An error occurred retrieving an access decision '
'for %r for user %r' % (
requestURI,
remoteUser))
response.content_type = 'text/plain'
log.info(response.body)
return response(environ, start_response)
# Cache assertion if flag is set and it's one that's been freshly
# obtained from an authorisation decision query rather than one
# retrieved from the cache
if self.cacheDecisions and noCachedAssertion:
self._cacheAssertions(request.url, [assertion])
# If got through to here then all is well, call next WSGI middleware/app
return self._app(environ, start_response)
| bsd-3-clause | -8,820,283,140,040,014,000 | 41.424581 | 106 | 0.592485 | false |
viranch/exodus | resources/lib/indexers/episodes.py | 1 | 65108 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from resources.lib.modules import trakt
from resources.lib.modules import cleantitle
from resources.lib.modules import cleangenre
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
import os,sys,re,json,zipfile,StringIO,urllib,urllib2,urlparse,datetime
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
control.moderator()
class seasons:
def __init__(self):
self.list = []
self.lang = control.apiLanguage()['tvdb']
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_by_imdb = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s'
self.tvdb_by_query = 'http://thetvdb.com/api/GetSeries.php?seriesname=%s'
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
def get(self, tvshowtitle, year, imdb, tvdb, idx=True):
if control.window.getProperty('PseudoTVRunning') == 'True':
return episodes().get(tvshowtitle, year, imdb, tvdb)
if idx == True:
self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tvdb, self.lang)
self.seasonDirectory(self.list)
return self.list
else:
self.list = self.tvdb_list(tvshowtitle, year, imdb, tvdb, 'en')
return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tvdb, lang, limit=''):
try:
if imdb == '0':
url = self.imdb_by_query % (urllib.quote_plus(tvshowtitle), year)
imdb = client.request(url, timeout='10')
try: imdb = json.loads(imdb)['imdbID']
except: imdb = '0'
if imdb == None or imdb == '' or imdb == 'N/A': imdb = '0'
if tvdb == '0' and not imdb == '0':
url = self.tvdb_by_imdb % imdb
result = client.request(url, timeout='10')
try: tvdb = client.parseDOM(result, 'seriesid')[0]
except: tvdb = '0'
try: name = client.parseDOM(result, 'SeriesName')[0]
except: name = '0'
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name)
if len(dupe) > 0: tvdb = str(dupe[0])
if tvdb == '': tvdb = '0'
if tvdb == '0':
url = self.tvdb_by_query % (urllib.quote_plus(tvshowtitle))
years = [str(year), str(int(year)+1), str(int(year)-1)]
tvdb = client.request(url, timeout='10')
tvdb = re.sub(r'[^\x00-\x7F]+', '', tvdb)
tvdb = client.replaceHTMLCodes(tvdb)
tvdb = client.parseDOM(tvdb, 'Series')
tvdb = [(x, client.parseDOM(x, 'SeriesName'), client.parseDOM(x, 'FirstAired')) for x in tvdb]
tvdb = [(x, x[1][0], x[2][0]) for x in tvdb if len(x[1]) > 0 and len(x[2]) > 0]
tvdb = [x for x in tvdb if cleantitle.get(tvshowtitle) == cleantitle.get(x[1])]
tvdb = [x[0][0] for x in tvdb if any(y in x[2] for y in years)][0]
tvdb = client.parseDOM(tvdb, 'seriesid')[0]
if tvdb == '': tvdb = '0'
except:
return
try:
if tvdb == '0': return
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
dupe = client.parseDOM(result, 'SeriesName')[0]
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe)
if len(dupe) > 0:
tvdb = str(dupe[0]).encode('utf-8')
url = self.tvdb_info_link % (tvdb, 'en')
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % 'en')
artwork = zip.read('banners.xml')
zip.close()
if not lang == 'en':
url = self.tvdb_info_link % (tvdb, lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result2 = zip.read('%s.xml' % lang)
zip.close()
else:
result2 = result
artwork = artwork.split('<Banner>')
artwork = [i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i]
artwork = [i for i in artwork if not 'seasonswide' in re.findall('<BannerPath>(.+?)</BannerPath>', i)[0]]
result = result.split('<Episode>')
result2 = result2.split('<Episode>')
item = result[0] ; item2 = result2[0]
episodes = [i for i in result if '<EpisodeNumber>' in i]
episodes = [i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i]
episodes = [i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i]
seasons = [i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i]
locals = [i for i in result2 if '<EpisodeNumber>' in i]
result = '' ; result2 = ''
if limit == '':
episodes = []
elif limit == '-1':
seasons = []
else:
episodes = [i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i]
seasons = []
try: poster = client.parseDOM(item, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
try: status = client.parseDOM(item, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
try: studio = client.parseDOM(item, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: cast = client.parseDOM(item, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: label = client.parseDOM(item2, 'SeriesName')[0]
except: label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
pass
for item in seasons:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
thumb = [i for i in artwork if client.parseDOM(i, 'Season')[0] == season]
try: thumb = client.parseDOM(thumb[0], 'BannerPath')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if thumb == '0': thumb = poster
self.list.append({'season': season, 'tvshowtitle': tvshowtitle, 'label': label, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
for item in episodes:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try:
local = client.parseDOM(item, 'id')[0]
local = [x for x in locals if '<id>%s</id>' % str(local) in x][0]
except:
local = item
label = client.parseDOM(local, 'EpisodeName')[0]
if label == '': label = '0'
label = client.replaceHTMLCodes(label)
label = label.encode('utf-8')
try: episodeplot = client.parseDOM(local, 'Overview')[0]
except: episodeplot = ''
if episodeplot == '': episodeplot = '0'
if episodeplot == '0': episodeplot = plot
episodeplot = client.replaceHTMLCodes(episodeplot)
try: episodeplot = episodeplot.encode('utf-8')
except: pass
self.list.append({'title': title, 'label': label, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': episodeplot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
return self.list
def seasonDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
try: indicators = playcount.getSeasonIndicators(items[0]['imdb'])
except: pass
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
labelMenu = control.lang(32055).encode('utf-8')
for i in items:
try:
label = '%s %s' % (labelMenu, i['season'])
systitle = sysname = urllib.quote_plus(i['tvshowtitle'])
imdb, tvdb, year, season = i['imdb'], i['tvdb'], i['year'], i['season']
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'tvshow'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'tvshowtitle': i['label']})
except: pass
try:
if season in indicators: meta.update({'playcount': 1, 'overlay': 7})
else: meta.update({'playcount': 0, 'overlay': 6})
except:
pass
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s' % (sysaddon, systitle, year, imdb, tvdb, season)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((watchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=7)' % (sysaddon, systitle, imdb, tvdb, season)))
cm.append((unwatchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=6)' % (sysaddon, systitle, imdb, tvdb, season)))
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, sysname, tvdb)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb'], 'poster': i['thumb']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster'], 'poster': i['poster']})
else:
art.update({'icon': addonPoster, 'thumb': addonPoster, 'poster': addonPoster})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
try: control.property(syshandle, 'showplot', items[0]['plot'])
except: pass
control.content(syshandle, 'seasons')
control.directory(syshandle, cacheToDisc=True)
views.setView('seasons', {'skin.estuary': 55, 'skin.confluence': 500})
class episodes:
def __init__(self):
self.list = []
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.tvmaze_link = 'http://api.tvmaze.com'
self.tvdb_key = 'MUQ2MkYyRjkwMDMwQzQ0NA=='
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user').strip()
self.lang = control.apiLanguage()['tvdb']
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key.decode('base64'), '%s', '%s')
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
self.added_link = 'http://api.tvmaze.com/schedule'
self.mycalendar_link = 'http://api-v2launch.trakt.tv/calendars/my/shows/date[29]/60/'
self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/me/history/shows?limit=300'
self.progress_link = 'http://api-v2launch.trakt.tv/users/me/watched/shows'
self.hiddenprogress_link = 'http://api-v2launch.trakt.tv/users/hidden/progress_watched?limit=1000&type=show'
self.calendar_link = 'http://api.tvmaze.com/schedule?date=%s'
self.traktlists_link = 'http://api-v2launch.trakt.tv/users/me/lists'
self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
def get(self, tvshowtitle, year, imdb, tvdb, season=None, episode=None, idx=True):
try:
if idx == True:
if season == None and episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
elif episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, season)
else:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tvdb, self.lang, '-1')
num = [x for x,y in enumerate(self.list) if y['season'] == str(season) and y['episode'] == str(episode)][-1]
self.list = [y for x,y in enumerate(self.list) if x >= num]
self.episodeDirectory(self.list)
return self.list
else:
self.list = seasons().tvdb_list(tvshowtitle, year, imdb, tvdb, 'en', '-1')
return self.list
except:
pass
def calendar(self, url):
try:
try: url = getattr(self, url + '_link')
except: pass
if self.trakt_link in url and url == self.progress_link:
self.blist = cache.get(self.trakt_progress_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_progress_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and url == self.mycalendar_link:
self.blist = cache.get(self.trakt_episodes_list, 720, url, self.trakt_user, self.lang)
self.list = []
self.list = cache.get(self.trakt_episodes_list, 0, url, self.trakt_user, self.lang)
elif self.trakt_link in url and '/users/' in url:
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.list = self.list[::-1]
elif self.trakt_link in url:
self.list = cache.get(self.trakt_list, 1, url, self.trakt_user)
elif self.tvmaze_link in url and url == self.added_link:
urls = [i['url'] for i in self.calendars(idx=False)][:5]
self.list = []
for url in urls:
self.list += cache.get(self.tvmaze_list, 720, url, True)
elif self.tvmaze_link in url:
self.list = cache.get(self.tvmaze_list, 1, url, False)
self.episodeDirectory(self.list)
return self.list
except:
pass
def widget(self):
if trakt.getTraktIndicatorsInfo() == True:
setting = control.setting('tv.widget.alt')
else:
setting = control.setting('tv.widget')
if setting == '2':
self.calendar(self.progress_link)
elif setting == '3':
self.calendar(self.mycalendar_link)
else:
self.calendar(self.added_link)
def calendars(self, idx=True):
m = control.lang(32060).encode('utf-8').split('|')
try: months = [(m[0], 'January'), (m[1], 'February'), (m[2], 'March'), (m[3], 'April'), (m[4], 'May'), (m[5], 'June'), (m[6], 'July'), (m[7], 'August'), (m[8], 'September'), (m[9], 'October'), (m[10], 'November'), (m[11], 'December')]
except: months = []
d = control.lang(32061).encode('utf-8').split('|')
try: days = [(d[0], 'Monday'), (d[1], 'Tuesday'), (d[2], 'Wednesday'), (d[3], 'Thursday'), (d[4], 'Friday'), (d[5], 'Saturday'), (d[6], 'Sunday')]
except: days = []
for i in range(0, 30):
try:
name = (self.datetime - datetime.timedelta(days = i))
name = (control.lang(32062) % (name.strftime('%A'), name.strftime('%d %B'))).encode('utf-8')
for m in months: name = name.replace(m[1], m[0])
for d in days: name = name.replace(d[1], d[0])
try: name = name.encode('utf-8')
except: pass
url = self.calendar_link % (self.datetime - datetime.timedelta(days = i)).strftime('%Y-%m-%d')
self.list.append({'name': name, 'url': url, 'image': 'calendar.png', 'action': 'calendar'})
except:
pass
if idx == True: self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link, self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'calendar'})
self.addDirectory(self.list, queue=True)
return self.list
def trakt_list(self, url, user):
try:
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
itemlist = []
items = json.loads(result)
except:
return
for item in items:
try:
title = item['episode']['title']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['episode']['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['episode']['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
premiered = item['episode']['first_aired']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
studio = item['show']['network']
if studio == None: studio = '0'
studio = studio.encode('utf-8')
genre = item['show']['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['show']['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['episode']['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['show']['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
mpaa = item['show']['certification']
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['episode']['overview']
if plot == None or plot == '': plot = item['show']['overview']
if plot == None or plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': '0', 'thumb': '0'})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def trakt_progress_list(self, url, user, lang):
try:
url += '?extended=full'
result = trakt.getTrakt(url)
result = json.loads(result)
items = []
except:
return
for item in result:
try:
num_1 = 0
for i in range(0, len(item['seasons'])): num_1 += len(item['seasons'][i]['episodes'])
num_2 = int(item['show']['aired_episodes'])
if num_1 >= num_2: raise Exception()
season = str(item['seasons'][-1]['number'])
season = season.encode('utf-8')
episode = str(item['seasons'][-1]['episodes'][-1]['number'])
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int(self.datetime.strftime('%Y')): raise Exception()
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
items.append({'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year, 'snum': season, 'enum': episode})
except:
pass
try:
result = trakt.getTrakt(self.hiddenprogress_link)
result = json.loads(result)
result = [str(i['show']['ids']['tvdb']) for i in result]
items = [i for i in items if not i['tvdb'] in result]
except:
pass
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['snum'] == i['snum'] and x['enum'] == i['enum']][0]
item['action'] = 'episodes'
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [x for x in result if '<EpisodeNumber>' in x]
item2 = result[0]
num = [x for x,y in enumerate(item) if re.compile('<SeasonNumber>(.+?)</SeasonNumber>').findall(y)[0] == str(i['snum']) and re.compile('<EpisodeNumber>(.+?)</EpisodeNumber>').findall(y)[0] == str(i['enum'])][-1]
item = [y for x,y in enumerate(item) if x > num][0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'snum': i['snum'], 'enum': i['enum'], 'action': 'episodes'})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
try: self.list = sorted(self.list, key=lambda k: k['premiered'], reverse=True)
except: pass
return self.list
def trakt_episodes_list(self, url, user, lang):
items = self.trakt_list(url, user)
def items_list(i):
try:
item = [x for x in self.blist if x['tvdb'] == i['tvdb'] and x['season'] == i['season'] and x['episode'] == i['episode']][0]
if item['poster'] == '0': raise Exception()
self.list.append(item)
return
except:
pass
try:
url = self.tvdb_info_link % (i['tvdb'], lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [(re.findall('<SeasonNumber>%01d</SeasonNumber>' % int(i['season']), x), re.findall('<EpisodeNumber>%01d</EpisodeNumber>' % int(i['episode']), x), x) for x in result]
item = [x[2] for x in item if len(x[0]) > 0 and len(x[1]) > 0][0]
item2 = result[0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tvdb = i['imdb'], i['tvdb']
year = i['year']
try: year = year.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
items = items[:100]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: name = item['list']['name']
except: name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try: url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except: url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def tvmaze_list(self, url, limit):
try:
result = client.request(url)
itemlist = []
items = json.loads(result)
except:
return
for item in items:
try:
if not 'english' in item['show']['language'].lower(): raise Exception()
if limit == True and not 'scripted' in item['show']['type'].lower(): raise Exception()
title = item['name']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['name']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['premiered']
year = re.findall('(\d{4})', year)[0]
year = year.encode('utf-8')
imdb = item['show']['externals']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['externals']['thetvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
poster = '0'
try: poster = item['show']['image']['original']
except: poster = '0'
if poster == None or poster == '': poster = '0'
poster = poster.encode('utf-8')
try: thumb1 = item['show']['image']['original']
except: thumb1 = '0'
try: thumb2 = item['image']['original']
except: thumb2 = '0'
if thumb2 == None or thumb2 == '0': thumb = thumb1
else: thumb = thumb2
if thumb == None or thumb == '': thumb = '0'
thumb = thumb.encode('utf-8')
premiered = item['airdate']
try: premiered = re.findall('(\d{4}-\d{2}-\d{2})', premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
try: studio = item['show']['network']['name']
except: studio = '0'
if studio == None: studio = '0'
studio = studio.encode('utf-8')
try: genre = item['show']['genres']
except: genre = '0'
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = item['show']['runtime']
except: duration = '0'
if duration == None: duration = '0'
duration = str(duration)
duration = duration.encode('utf-8')
try: rating = item['show']['rating']['average']
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = str(rating)
rating = rating.encode('utf-8')
try: plot = item['show']['summary']
except: plot = '0'
if plot == None: plot = '0'
plot = re.sub('<.+?>|</.+?>|\n', '', plot)
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'plot': plot, 'imdb': imdb, 'tvdb': tvdb, 'poster': poster, 'thumb': thumb})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def episodeDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getTVShowIndicators(refresh=True)
try: multi = [i['tvshowtitle'] for i in items]
except: multi = []
multi = len([x for y,x in enumerate(multi) if x not in multi[:y]])
multi = True if multi > 1 else False
try: sysaction = items[0]['action']
except: sysaction = ''
isFolder = False if not sysaction == 'episodes' else True
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
tvshowBrowserMenu = control.lang(32071).encode('utf-8')
for i in items:
try:
if not 'label' in i: i['label'] = i['title']
if i['label'] == '0':
label = '%sx%02d . %s %s' % (i['season'], int(i['episode']), 'Episode', i['episode'])
else:
label = '%sx%02d . %s' % (i['season'], int(i['episode']), i['label'])
if multi == True:
label = '%s - %s' % (i['tvshowtitle'], label)
imdb, tvdb, year, season, episode = i['imdb'], i['tvdb'], i['year'], i['season'], i['episode']
systitle = urllib.quote_plus(i['title'])
systvshowtitle = urllib.quote_plus(i['tvshowtitle'])
syspremiered = urllib.quote_plus(i['premiered'])
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'episode'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, systvshowtitle)})
if not 'duration' in i: meta.update({'duration': '60'})
elif i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
try: meta.update({'year': re.findall('(\d{4})', i['premiered'])[0]})
except: pass
try: meta.update({'title': i['label']})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s&tvshowtitle=%s&premiered=%s' % (sysaddon, systitle, year, imdb, tvdb, season, episode, systvshowtitle, syspremiered)
if isFolder == True:
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s&season=%s&episode=%s' % (sysaddon, systvshowtitle, year, imdb, tvdb, season, episode)
cm = []
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
if multi == True:
cm.append((tvshowBrowserMenu, 'Container.Update(%s?action=seasons&tvshowtitle=%s&year=%s&imdb=%s&tvdb=%s,return)' % (sysaddon, systvshowtitle, year, imdb, tvdb)))
try:
overlay = int(playcount.getEpisodeOverlay(indicators, imdb, tvdb, season, episode))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=6)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=7)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
if traktCredentials == True:
cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, systvshowtitle, tvdb)))
if isFolder == False:
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
art = {}
if 'poster' in i and not i['poster'] == '0':
art.update({'poster': i['poster'], 'tvshow.poster': i['poster'], 'season.poster': i['poster']})
else:
art.update({'poster': addonPoster})
if 'thumb' in i and not i['thumb'] == '0':
art.update({'icon': i['thumb'], 'thumb': i['thumb']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'icon': i['fanart'], 'thumb': i['fanart']})
elif 'poster' in i and not i['poster'] == '0':
art.update({'icon': i['poster'], 'thumb': i['poster']})
else:
art.update({'icon': addonFanart, 'thumb': addonFanart})
if 'banner' in i and not i['banner'] == '0':
art.update({'banner': i['banner']})
elif 'fanart' in i and not i['fanart'] == '0':
art.update({'banner': i['fanart']})
else:
art.update({'banner': addonBanner})
if settingFanart == 'true' and 'fanart' in i and not i['fanart'] == '0':
item.setProperty('Fanart_Image', i['fanart'])
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setArt(art)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
except:
pass
control.content(syshandle, 'episodes')
control.directory(syshandle, cacheToDisc=True)
views.setView('episodes', {'skin.estuary': 55, 'skin.confluence': 504})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
queueMenu = control.lang(32065).encode('utf-8')
for i in items:
try:
name = i['name']
if i['image'].startswith('http'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
if queue == True:
cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
item = control.item(label=name)
item.setArt({'icon': thumb, 'thumb': thumb})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'addons')
control.directory(syshandle, cacheToDisc=True)
| gpl-3.0 | -4,454,347,603,305,280,000 | 41.526453 | 502 | 0.504408 | false |
dakrauth/strutil | setup.py | 1 | 1169 | #!/usr/bin/env python
import os, sys
from setuptools import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit(0)
with open('README.rst', 'r') as f:
long_description = f.read()
# Dynamically calculate the version based on swingtime.VERSION.
version = __import__('strutil').__version__
setup(
name='strutil',
url='https://github.com/dakrauth/strutil',
author='David A Krauth',
author_email='[email protected]',
description='Simple tools for downloading, cleaning, extracting and parsing content',
version=version,
long_description=long_description,
platforms=['any'],
license='MIT License',
py_modules=['strutil'],
classifiers=(
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing'
),
)
| mit | -4,161,712,837,771,272,700 | 30.594595 | 89 | 0.63302 | false |
maas/maas | src/maasserver/rpc/tests/test_leases.py | 1 | 20464 | # Copyright 2015-2016 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `rpc.leases`."""
from datetime import datetime
import random
import time
from django.utils import timezone
from netaddr import IPAddress
from testtools.matchers import Contains, Equals, MatchesStructure, Not
from maasserver.enum import INTERFACE_TYPE, IPADDRESS_FAMILY, IPADDRESS_TYPE
from maasserver.models import DNSResource
from maasserver.models.interface import UnknownInterface
from maasserver.models.staticipaddress import StaticIPAddress
from maasserver.rpc.leases import LeaseUpdateError, update_lease
from maasserver.testing.factory import factory
from maasserver.testing.testcase import MAASServerTestCase
from maasserver.utils.orm import get_one, reload_object
class TestUpdateLease(MAASServerTestCase):
def make_kwargs(
self,
action=None,
mac=None,
ip=None,
timestamp=None,
lease_time=None,
hostname=None,
subnet=None,
):
if action is None:
action = random.choice(["commit", "expiry", "release"])
if mac is None:
mac = factory.make_mac_address()
if ip is None:
if subnet is not None:
ip = factory.pick_ip_in_network(subnet.get_ipnetwork())
else:
ip = factory.make_ip_address()
if timestamp is None:
timestamp = int(time.time())
if action == "commit":
if lease_time is None:
lease_time = random.randint(30, 1000)
if hostname is None:
hostname = factory.make_name("host")
ip_family = "ipv4"
if IPAddress(ip).version == IPADDRESS_FAMILY.IPv6:
ip_family = "ipv6"
return {
"action": action,
"mac": mac,
"ip": ip,
"ip_family": ip_family,
"timestamp": timestamp,
"lease_time": lease_time,
"hostname": hostname,
}
def make_managed_subnet(self):
return factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
def test_raises_LeaseUpdateError_for_unknown_action(self):
action = factory.make_name("action")
kwargs = self.make_kwargs(action=action)
error = self.assertRaises(LeaseUpdateError, update_lease, **kwargs)
self.assertEqual("Unknown lease action: %s" % action, str(error))
def test_raises_LeaseUpdateError_for_no_subnet(self):
kwargs = self.make_kwargs()
error = self.assertRaises(LeaseUpdateError, update_lease, **kwargs)
self.assertEqual("No subnet exists for: %s" % kwargs["ip"], str(error))
def test_raises_LeaseUpdateError_for_ipv4_mismatch(self):
ipv6_network = factory.make_ipv6_network()
subnet = factory.make_Subnet(cidr=str(ipv6_network.cidr))
kwargs = self.make_kwargs(subnet=subnet)
kwargs["ip_family"] = "ipv4"
error = self.assertRaises(LeaseUpdateError, update_lease, **kwargs)
self.assertEqual(
"Family for the subnet does not match. Expected: ipv4", str(error)
)
def test_raises_LeaseUpdateError_for_ipv6_mismatch(self):
ipv4_network = factory.make_ipv4_network()
subnet = factory.make_Subnet(cidr=str(ipv4_network.cidr))
kwargs = self.make_kwargs(subnet=subnet)
kwargs["ip_family"] = "ipv6"
error = self.assertRaises(LeaseUpdateError, update_lease, **kwargs)
self.assertEqual(
"Family for the subnet does not match. Expected: ipv6", str(error)
)
def test_does_nothing_if_expiry_for_unknown_mac(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(action="expiry", ip=ip)
update_lease(**kwargs)
self.assertIsNone(
StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=ip
).first()
)
def test_does_nothing_if_release_for_unknown_mac(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(action="release", ip=ip)
update_lease(**kwargs)
self.assertIsNone(
StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=ip
).first()
)
def test_creates_lease_for_unknown_interface(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(action="commit", ip=ip)
update_lease(**kwargs)
unknown_interface = UnknownInterface.objects.filter(
mac_address=kwargs["mac"]
).first()
self.assertIsNotNone(unknown_interface)
self.assertEqual(subnet.vlan, unknown_interface.vlan)
sip = unknown_interface.ip_addresses.first()
self.assertIsNotNone(sip)
self.assertThat(
sip,
MatchesStructure.byEquality(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=ip,
subnet=subnet,
lease_time=kwargs["lease_time"],
created=datetime.fromtimestamp(kwargs["timestamp"]),
updated=datetime.fromtimestamp(kwargs["timestamp"]),
),
)
def test_create_ignores_none_hostname(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
hostname = "(none)"
kwargs = self.make_kwargs(action="commit", ip=ip, hostname=hostname)
update_lease(**kwargs)
unknown_interface = UnknownInterface.objects.filter(
mac_address=kwargs["mac"]
).first()
self.assertIsNotNone(unknown_interface)
self.assertEqual(subnet.vlan, unknown_interface.vlan)
sip = unknown_interface.ip_addresses.first()
self.assertIsNotNone(sip)
self.assertThat(
sip,
MatchesStructure.byEquality(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=ip,
subnet=subnet,
lease_time=kwargs["lease_time"],
created=datetime.fromtimestamp(kwargs["timestamp"]),
updated=datetime.fromtimestamp(kwargs["timestamp"]),
),
)
# No DNS record should have been crated.
self.assertThat(DNSResource.objects.count(), Equals(0))
def test_creates_dns_record_for_hostname(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
hostname = factory.make_name().lower()
kwargs = self.make_kwargs(action="commit", ip=ip, hostname=hostname)
update_lease(**kwargs)
unknown_interface = UnknownInterface.objects.filter(
mac_address=kwargs["mac"]
).first()
self.assertIsNotNone(unknown_interface)
self.assertEqual(subnet.vlan, unknown_interface.vlan)
sip = unknown_interface.ip_addresses.first()
self.assertIsNotNone(sip)
dnsrr = get_one(DNSResource.objects.filter(name=hostname))
self.assertThat(sip.dnsresource_set.all(), Contains(dnsrr))
def test_mutiple_calls_reuse_existing_staticipaddress_records(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
hostname = factory.make_name().lower()
kwargs = self.make_kwargs(action="commit", ip=ip, hostname=hostname)
update_lease(**kwargs)
sip1 = StaticIPAddress.objects.get(ip=ip)
update_lease(**kwargs)
sip2 = StaticIPAddress.objects.get(ip=ip)
self.assertThat(sip1.id, Equals(sip2.id))
def test_skips_dns_record_for_hostname_from_existing_node(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
hostname = factory.make_name().lower()
factory.make_Node(hostname=hostname)
kwargs = self.make_kwargs(action="commit", ip=ip, hostname=hostname)
update_lease(**kwargs)
unknown_interface = UnknownInterface.objects.filter(
mac_address=kwargs["mac"]
).first()
self.assertIsNotNone(unknown_interface)
self.assertEqual(subnet.vlan, unknown_interface.vlan)
sip = unknown_interface.ip_addresses.first()
self.assertIsNotNone(sip)
self.assertThat(sip.dnsresource_set.all(), Not(Contains(sip)))
def test_skips_dns_record_for_coerced_hostname_from_existing_node(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
hostname = "gaming device"
factory.make_Node(hostname="gaming-device")
kwargs = self.make_kwargs(action="commit", ip=ip, hostname=hostname)
update_lease(**kwargs)
unknown_interface = UnknownInterface.objects.filter(
mac_address=kwargs["mac"]
).first()
self.assertIsNotNone(unknown_interface)
self.assertEqual(subnet.vlan, unknown_interface.vlan)
sip = unknown_interface.ip_addresses.first()
self.assertIsNotNone(sip)
self.assertThat(sip.dnsresource_set.all(), Not(Contains(sip)))
def test_creates_lease_for_physical_interface(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(
action="commit", mac=boot_interface.mac_address, ip=ip
)
update_lease(**kwargs)
sip = StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=ip
).first()
self.assertThat(
sip,
MatchesStructure.byEquality(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=ip,
subnet=subnet,
lease_time=kwargs["lease_time"],
created=datetime.fromtimestamp(kwargs["timestamp"]),
updated=datetime.fromtimestamp(kwargs["timestamp"]),
),
)
self.assertItemsEqual(
[boot_interface.id], sip.interface_set.values_list("id", flat=True)
)
self.assertEqual(
1,
StaticIPAddress.objects.filter_by_ip_family(
subnet.get_ipnetwork().version
)
.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, interface=boot_interface
)
.count(),
"Interface should only have one DISCOVERED IP address.",
)
def test_creates_lease_for_physical_interface_keeps_other_ip_family(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(
action="commit", mac=boot_interface.mac_address, ip=ip
)
# Make DISCOVERED in the other address family to make sure it is
# not removed.
network = subnet.get_ipnetwork()
if network.version == IPADDRESS_FAMILY.IPv4:
other_network = factory.make_ipv6_network()
else:
other_network = factory.make_ipv4_network()
other_subnet = factory.make_Subnet(cidr=str(other_network.cidr))
other_ip = factory.make_StaticIPAddress(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip="",
subnet=other_subnet,
interface=boot_interface,
)
update_lease(**kwargs)
self.assertIsNotNone(
reload_object(other_ip),
"DISCOVERED IP address from the other address family should not "
"have been deleted.",
)
def test_creates_lease_for_bond_interface(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
bond_interface = factory.make_Interface(
INTERFACE_TYPE.BOND,
mac_address=boot_interface.mac_address,
parents=[boot_interface],
)
kwargs = self.make_kwargs(
action="commit", mac=bond_interface.mac_address, ip=ip
)
update_lease(**kwargs)
sip = StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=ip
).first()
self.assertThat(
sip,
MatchesStructure.byEquality(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=ip,
subnet=subnet,
lease_time=kwargs["lease_time"],
created=datetime.fromtimestamp(kwargs["timestamp"]),
updated=datetime.fromtimestamp(kwargs["timestamp"]),
),
)
self.assertItemsEqual(
[boot_interface.id, bond_interface.id],
sip.interface_set.values_list("id", flat=True),
)
def test_release_removes_lease_keeps_discovered_subnet(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(
action="release", mac=boot_interface.mac_address, ip=ip
)
update_lease(**kwargs)
sip = StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=None,
subnet=subnet,
interface=boot_interface,
).first()
self.assertIsNotNone(
sip,
"DISCOVERED IP address shold have been created without an "
"IP address.",
)
self.assertItemsEqual(
[boot_interface.id], sip.interface_set.values_list("id", flat=True)
)
def test_expiry_removes_lease_keeps_discovered_subnet(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(
action="expiry", mac=boot_interface.mac_address, ip=ip
)
update_lease(**kwargs)
sip = StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED,
ip=None,
subnet=subnet,
interface=boot_interface,
).first()
self.assertIsNotNone(
sip,
"DISCOVERED IP address shold have been created without an "
"IP address.",
)
self.assertItemsEqual(
[boot_interface.id], sip.interface_set.values_list("id", flat=True)
)
def test_expiry_does_not_keep_adding_null_ip_records_repeated_calls(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
# Create a bunch of null IPs to show the effects of bug 1817056.
null_ips = [
StaticIPAddress(
created=timezone.now(),
updated=timezone.now(),
ip=None,
alloc_type=IPADDRESS_TYPE.DISCOVERED,
subnet=subnet,
)
for _ in range(10)
]
StaticIPAddress.objects.bulk_create(null_ips)
node = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface = node.get_boot_interface()
boot_interface.ip_addresses.add(*null_ips)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs = self.make_kwargs(
action="expiry", mac=boot_interface.mac_address, ip=ip
)
null_ip_query = StaticIPAddress.objects.filter(
alloc_type=IPADDRESS_TYPE.DISCOVERED, ip=None, subnet=subnet
)
update_lease(**kwargs)
# XXX: We shouldn't need to record the previous count and
# instead expect the count to be 1. This will be addressed
# in bug 1817305.
previous_null_ip_count = null_ip_query.count()
previous_interface_ip_count = boot_interface.ip_addresses.count()
update_lease(**kwargs)
self.assertEqual(previous_null_ip_count, null_ip_query.count())
self.assertEqual(
previous_interface_ip_count, boot_interface.ip_addresses.count()
)
def test_expiry_does_not_keep_adding_null_ip_records_other_interface(self):
subnet = factory.make_ipv4_Subnet_with_IPRanges(
with_static_range=False, dhcp_on=True
)
node1 = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface1 = node1.get_boot_interface()
node2 = factory.make_Node_with_Interface_on_Subnet(subnet=subnet)
boot_interface2 = node2.get_boot_interface()
# We now have two nodes, both having null IP records linking
# them to the same subnet.
self.assertIsNone(boot_interface1.ip_addresses.first().ip)
self.assertIsNone(boot_interface2.ip_addresses.first().ip)
dynamic_range = subnet.get_dynamic_ranges()[0]
ip = factory.pick_ip_in_IPRange(dynamic_range)
kwargs1 = self.make_kwargs(
action="expiry", mac=boot_interface1.mac_address, ip=ip
)
kwargs2 = self.make_kwargs(
action="expiry", mac=boot_interface2.mac_address, ip=ip
)
self.assertEqual(1, boot_interface1.ip_addresses.count())
self.assertEqual(1, boot_interface2.ip_addresses.count())
# When expiring the leases for the two nodes, they keep the
# existing links they have.
previous_ip_id1 = boot_interface1.ip_addresses.first().id
previous_ip_id2 = boot_interface2.ip_addresses.first().id
update_lease(**kwargs1)
update_lease(**kwargs2)
[ip_address1] = boot_interface1.ip_addresses.all()
self.assertEqual(previous_ip_id1, ip_address1.id)
self.assertEqual(1, ip_address1.interface_set.count())
[ip_address2] = boot_interface2.ip_addresses.all()
self.assertEqual(previous_ip_id2, ip_address2.id)
self.assertEqual(1, ip_address2.interface_set.count())
self.assertEqual(1, boot_interface1.ip_addresses.count())
self.assertEqual(1, boot_interface2.ip_addresses.count())
| agpl-3.0 | 3,252,529,167,948,657,700 | 39.362919 | 79 | 0.613614 | false |
torkelsson/meta-package-manager | setup.py | 1 | 6388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2018 Kevin Deldycke <[email protected]>
# and contributors.
# All Rights Reserved.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals
)
import io
import re
from os import path
from setuptools import find_packages, setup
MODULE_NAME = 'meta_package_manager'
PACKAGE_NAME = MODULE_NAME.replace('_', '-')
DEPENDENCIES = [
'boltons >= 17.0.0',
'click >= 5.0',
'click_log >= 0.2.0',
'cli-helpers',
'packaging',
'simplejson',
# shutil.which() is only available starting with Python 3.3. Use backports
# for Python 2. See marker usage at: https://setuptools.readthedocs.io/en
# /latest/setuptools.html#declaring-platform-specific-dependencies
'backports.shutil_which >= 3.5.1 ; python_version < "3.3"',
]
EXTRA_DEPENDENCIES = {
# Extra dependencies are made available through the
# `$ pip install .[keyword]` command.
'docs': [
'sphinx >= 1.4',
'sphinx_rtd_theme'],
'tests': [
'pycodestyle >= 2.1.0',
'pylint',
'pytest',
# More pytest plugins at: http://plugincompat.herokuapp.com .
'pytest-cov'],
'develop': [
'bumpversion',
'isort',
'readme_renderer >= 17.0',
'setuptools >= 24.2.1',
'wheel'],
}
def read_file(*relative_path_elements):
""" Return content of a file relative to this ``setup.py``. """
file_path = path.join(path.dirname(__file__), *relative_path_elements)
return io.open(file_path, encoding='utf8').read().strip()
# Cache fetched version.
_version = None # noqa
def version():
""" Extracts version from the ``__init__.py`` file at the module's root.
Inspired by: https://packaging.python.org/single_source_version/
"""
global _version
if _version:
return _version
init_file = read_file(MODULE_NAME, '__init__.py')
matches = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', init_file, re.M)
if not matches:
raise RuntimeError("Unable to find version string in __init__.py .")
_version = matches.group(1) # noqa
return _version
def latest_changes():
""" Extract part of changelog pertaining to version. """
lines = []
for line in read_file('CHANGES.rst').splitlines():
if line.startswith('-------'):
if len(lines) > 1:
lines = lines[:-1]
break
if lines:
lines.append(line)
elif line.startswith("`{} (".format(version())):
lines.append(line)
if not lines:
raise RuntimeError(
"Unable to find changelog for the {} release.".format(version()))
# Renormalize and clean lines.
return '\n'.join(lines).strip().split('\n')
def long_description():
""" Collates project README and latest changes. """
changes = latest_changes()
changes[0] = "`Changes for v{}".format(changes[0][1:])
changes[1] = '-' * len(changes[0])
return "\n\n\n".join([
read_file('README.rst'),
'\n'.join(changes),
"`Full changelog <https://{}.readthedocs.io/en/develop/changelog.html"
"#changelog>`_.".format(PACKAGE_NAME),
])
setup(
name=PACKAGE_NAME,
version=version(),
description="CLI providing unifying interface to all package managers.",
long_description=long_description(),
keywords=[
'CLI', 'package', 'pip', 'apm', 'npm', 'homebrew', 'brew', 'cask',
'osx', 'macos', 'node', 'atom', 'ruby', 'gem', 'appstore', 'mas',
'bitbar', 'plugin'],
author='Kevin Deldycke',
author_email='[email protected]',
url='https://github.com/kdeldycke/meta-package-manager',
license='GPLv2+',
packages=find_packages(),
# https://www.python.org/dev/peps/pep-0345/#version-specifiers
python_requires='>= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.*',
install_requires=DEPENDENCIES,
tests_require=DEPENDENCIES + EXTRA_DEPENDENCIES['tests'],
extras_require=EXTRA_DEPENDENCIES,
dependency_links=[],
test_suite='{}.tests'.format(MODULE_NAME),
classifiers=[
# See: https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Plugins',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: '
'GNU General Public License v2 or later (GPLv2+)',
'Operating System :: MacOS :: MacOS X',
# List of python versions and their support status:
# https://en.wikipedia.org/wiki/CPython#Version_history
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving :: Packaging',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Software Distribution',
'Topic :: Utilities',
],
entry_points={
'console_scripts': [
'mpm={}.cli:cli'.format(MODULE_NAME),
],
}
)
| gpl-2.0 | -2,708,297,968,679,874,000 | 32.445026 | 78 | 0.607702 | false |
gabstopper/smc-python | smc/elements/profiles.py | 1 | 9278 | """
Profiles are templates used in other parts of the system to provide default
functionality for specific feature sets. For example, to enable DNS Relay on
an engine you must specify a DNSRelayProfile to use which defines the common
settings (or sub-settings) for that feature.
A DNS Relay Profile allows multiple DNS related mappings that can be configured.
Example usage::
>>> from smc.elements.profiles import DNSRelayProfile
>>> profile = DNSRelayProfile('mynewprofile')
.. note:: If the DNSRelayProfile does not exist, it will automatically be
created when a DNS relay rule is added to the DNSRelayProfile instance.
Add a fixed domain answer rule::
>>> profile.fixed_domain_answer.add([('microsoft3.com', 'foo.com'), ('microsoft4.com',)])
>>> profile.fixed_domain_answer.all()
[{u'domain_name': u'microsoft3.com', u'translated_domain_name': u'foo.com'}, {u'domain_name': u'microsoft4.com'}]
Translate hostnames (not fqdn) to a specific IP address::
>>> profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.12')])
>>> profile.hostname_mapping.all()
[{u'hostnames': u'hostname1,hostname2', u'ipaddress': u'1.1.1.12'}]
Translate an IP address to another::
>>> profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
>>> profile.dns_answer_translation.all()
[{u'translated_ipaddress': u'172.18.1.20', u'original_ipaddress': u'12.12.12.12'}]
Specify a DNS server to handle specific domains::
>>> profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
>>> profile.domain_specific_dns_server.all()
[{u'dns_server_addresses': u'172.18.1.20', u'domain_name': u'myfoo.com'}]
"""
from smc.base.model import Element, ElementCreator
from smc.api.exceptions import ElementNotFound
from smc.base.util import element_resolver
class DNSRule(object):
"""
DNSRule is the parent class for all DNS relay rules.
"""
__slots__ = ('profile')
def __init__(self, profile):
self.profile = profile
def add(self, instance, answers):
key, left, right = instance._attr
json = [dict(zip([left, right], d))
for d in answers]
try:
self.profile.data[key].extend(json)
self.profile.update()
except ElementNotFound:
j = {'name': self.profile.name,
key: json}
return ElementCreator(self.profile.__class__, j)
def all(self):
"""
Return all entries
:rtype: list(dict)
"""
attribute = self._attr[0]
return self.profile.data.get(attribute, [])
class FixedDomainAnswer(DNSRule):
"""
Direct requests for specific domains to IPv4 addresses, IPv6
addresses, fully qualified domain names (FQDNs), or empty DNS replies
"""
_attr = ('fixed_domain_answer', 'domain_name', 'translated_domain_name')
def add(self, answers):
"""
Add a fixed domain answer. This should be a list of
two-tuples, the first entry is the domain name, and
the second is the translated domain value::
profile = DNSRelayProfile('dnsrules')
profile.fixed_domain_answer.add([
('microsoft.com', 'foo.com'), ('microsoft2.com',)])
:param answers: (domain_name, translated_domain_name)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
.. note:: translated_domain_name can be none, which will cause
the NGFW to return NXDomain for the specified domain.
"""
super(FixedDomainAnswer, self).add(self, answers)
class HostnameMapping(DNSRule):
"""
Statically map host names, aliases for host names, and unqualified
names (a host name without the domain suffix) to IPv4 or IPv6
addresses
"""
_attr = ('hostname_mapping', 'hostnames', 'ipaddress')
def add(self, answers):
"""
Map specific hostname to specified IP address. Provide a list
of two-tuples. The first entry is the hostname/s to translate
(you can provide multiple comma separated values). The second
entry should be the IP address to map the hostnames to::
profile = DNSRelayProfile('dnsrules')
profile.hostname_mapping.add([('hostname1,hostname2', '1.1.1.1')])
:param answers: (hostnames, ipaddress), hostnames can be a
comma separated list.
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(HostnameMapping, self).add(self, answers)
class DomainSpecificDNSServer(DNSRule):
"""
Forward DNS requests to different DNS servers based on
the requested domain.
"""
_attr = ('domain_specific_dns_server', 'domain_name', 'dns_server_addresses')
def add(self, answers):
"""
Relay specific domains to a specified DNS server. Provide
a list of two-tuple with first entry the domain name to relay
for. The second entry is the DNS server that should handle the
query::
profile = DNSRelayProfile('dnsrules')
profile.domain_specific_dns_server.add([('myfoo.com', '172.18.1.20')])
:param answers: (domain_name, dns_server_addresses), dns server
addresses can be a comma separated string
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DomainSpecificDNSServer, self).add(self, answers)
class DNSAnswerTranslation(DNSRule):
"""
Map IPv4 addresses resolved by external DNS servers to IPv4
addresses in the internal network.
"""
_attr = ('dns_answer_translation', 'original_ipaddress', 'translated_ipaddress')
def add(self, answers):
"""
Takes an IPv4 address and translates to a specified IPv4 value.
Provide a list of two-tuple with the first entry providing the
original address and second entry specifying the translated address::
profile = DNSRelayProfile('dnsrules')
profile.dns_answer_translation.add([('12.12.12.12', '172.18.1.20')])
:param answers: (original_ipaddress, translated_ipaddress)
:type answers: tuple[str, str]
:raises UpdateElementFailed: failure to add to SMC
:return: None
"""
super(DNSAnswerTranslation, self).add(self, answers)
class DNSRelayProfile(Element):
"""
DNS Relay Settings specify a profile to handle how the engine will
interpret DNS queries. Stonesoft can act as a DNS relay, rewrite
DNS queries or redirect domains to the specified DNS servers.
"""
typeof = 'dns_relay_profile'
@property
def fixed_domain_answer(self):
"""
Add a fixed domain answer entry.
:rtype: FixedDomainAnswer
"""
return FixedDomainAnswer(self)
@property
def hostname_mapping(self):
"""
Add a hostname to IP mapping
:rtype: HostnameMapping
"""
return HostnameMapping(self)
@property
def domain_specific_dns_server(self):
"""
Add domain to DNS server mapping
:rtype: DomainSpecificDNSServer
"""
return DomainSpecificDNSServer(self)
@property
def dns_answer_translation(self):
"""
Add a DNS answer translation
:rtype: DNSAnswerTranslation
"""
return DNSAnswerTranslation(self)
class SNMPAgent(Element):
"""
Minimal implementation of SNMPAgent
"""
typeof = 'snmp_agent'
@classmethod
def create(cls, name, snmp_monitoring_contact=None,
snmp_monitoring_listening_port=161, snmp_version='v3',
comment=None):
json = {'boot': False,
'go_offline': False,
'go_online': False,
'hardware_alerts': False,
'name': name,
'policy_applied': False,
'shutdown': False,
'snmp_monitoring_contact': snmp_monitoring_contact,
'snmp_monitoring_listening_port': snmp_monitoring_listening_port,
'snmp_monitoring_user_name': [],
'snmp_trap_destination': [],
'snmp_user_name': [],
'snmp_version': snmp_version,
'user_login': False}
return ElementCreator(cls, json)
class SandboxService(Element):
typeof = 'sandbox_service'
@classmethod
def create(cls, name, sandbox_data_center, portal_username=None, comment=None):
"""
Create a Sandbox Service element
"""
json = {
'name': name,
'sandbox_data_center': element_resolver(sandbox_data_center),
'portal_username': portal_username if portal_username else '',
'comment': comment}
return ElementCreator(cls, json)
class SandboxDataCenter(Element):
typeof = 'sandbox_data_center'
| apache-2.0 | -6,040,503,220,421,455,000 | 32.861314 | 117 | 0.613171 | false |
unicef/rhizome | rhizome/api/resources/source_submission.py | 1 | 1345 | from rhizome.api.resources.base_model import BaseModelResource
from rhizome.models.document_models import SourceSubmission
class SourceSubmissionResource(BaseModelResource):
'''
**GET Request** Returns all SourceSubmissions unless an optional parameter is specified
- *Optional Parameters:*
'document_id': return only the source submissions with the specified document ids
- *Errors:*
if an incorrect document id is provided, returns an empty object list
'''
class Meta(BaseModelResource.Meta):
resource_name = 'source_submission'
object_class = SourceSubmission
# GET_params_required = ['document_id']
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
## fix this in the front end to request the resourec in REST style ##
id_param = request.GET.get('id', None)
if id_param:
return self.get_object_list(request).filter(**{'id': id_param})
doc_filter = {'document_id': request.GET.get('document_id')}
return self.get_object_list(request).filter(**doc_filter)
| agpl-3.0 | 15,860,401,860,111,412 | 39.757576 | 93 | 0.657993 | false |
Eksmo/calibre | src/calibre/gui2/preferences/search_ui.py | 1 | 13356 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/gugu/w/calibre/src/calibre/gui2/preferences/search.ui'
#
# Created: Thu Jul 19 23:32:29 2012
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(670, 663)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.opt_search_as_you_type = QtGui.QCheckBox(Form)
self.opt_search_as_you_type.setObjectName(_fromUtf8("opt_search_as_you_type"))
self.gridLayout.addWidget(self.opt_search_as_you_type, 0, 0, 1, 1)
self.opt_use_primary_find_in_search = QtGui.QCheckBox(Form)
self.opt_use_primary_find_in_search.setObjectName(_fromUtf8("opt_use_primary_find_in_search"))
self.gridLayout.addWidget(self.opt_use_primary_find_in_search, 0, 1, 1, 1)
self.opt_highlight_search_matches = QtGui.QCheckBox(Form)
self.opt_highlight_search_matches.setObjectName(_fromUtf8("opt_highlight_search_matches"))
self.gridLayout.addWidget(self.opt_highlight_search_matches, 1, 0, 1, 2)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 2)
self.opt_limit_search_columns = QtGui.QCheckBox(self.groupBox)
self.opt_limit_search_columns.setObjectName(_fromUtf8("opt_limit_search_columns"))
self.gridLayout_2.addWidget(self.opt_limit_search_columns, 1, 0, 1, 2)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 2, 0, 1, 1)
self.opt_limit_search_columns_to = EditWithComplete(self.groupBox)
self.opt_limit_search_columns_to.setObjectName(_fromUtf8("opt_limit_search_columns_to"))
self.gridLayout_2.addWidget(self.opt_limit_search_columns_to, 2, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setWordWrap(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 5, 0, 1, 2)
self.gridLayout.addWidget(self.groupBox, 4, 0, 1, 2)
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.l12 = QtGui.QHBoxLayout()
self.l12.setObjectName(_fromUtf8("l12"))
self.la10 = QtGui.QLabel(self.groupBox_2)
self.la10.setObjectName(_fromUtf8("la10"))
self.l12.addWidget(self.la10)
self.gst_names = QtGui.QComboBox(self.groupBox_2)
self.gst_names.setEditable(True)
self.gst_names.setMinimumContentsLength(10)
self.gst_names.setObjectName(_fromUtf8("gst_names"))
self.l12.addWidget(self.gst_names)
self.gst_delete_button = QtGui.QToolButton(self.groupBox_2)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("trash.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.gst_delete_button.setIcon(icon)
self.gst_delete_button.setObjectName(_fromUtf8("gst_delete_button"))
self.l12.addWidget(self.gst_delete_button)
self.gst_value = EditWithComplete(self.groupBox_2)
self.gst_value.setObjectName(_fromUtf8("gst_value"))
self.l12.addWidget(self.gst_value)
self.gst_save_button = QtGui.QToolButton(self.groupBox_2)
self.gst_save_button.setObjectName(_fromUtf8("gst_save_button"))
self.l12.addWidget(self.gst_save_button)
self.gridLayout_3.addLayout(self.l12, 0, 0, 1, 1)
self.gst_explanation = QtGui.QTextBrowser(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.gst_explanation.sizePolicy().hasHeightForWidth())
self.gst_explanation.setSizePolicy(sizePolicy)
self.gst_explanation.setObjectName(_fromUtf8("gst_explanation"))
self.gridLayout_3.addWidget(self.gst_explanation, 0, 1, 3, 1)
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setObjectName(_fromUtf8("hboxlayout"))
self.l11 = QtGui.QLabel(self.groupBox_2)
self.l11.setObjectName(_fromUtf8("l11"))
self.hboxlayout.addWidget(self.l11)
self.opt_grouped_search_make_user_categories = EditWithComplete(self.groupBox_2)
self.opt_grouped_search_make_user_categories.setObjectName(_fromUtf8("opt_grouped_search_make_user_categories"))
self.hboxlayout.addWidget(self.opt_grouped_search_make_user_categories)
self.gridLayout_3.addLayout(self.hboxlayout, 1, 0, 1, 1)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem, 2, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 6, 0, 1, 2)
self.clear_history_button = QtGui.QPushButton(Form)
self.clear_history_button.setObjectName(_fromUtf8("clear_history_button"))
self.gridLayout.addWidget(self.clear_history_button, 5, 0, 1, 2)
self.groupBox22 = QtGui.QGroupBox(Form)
self.groupBox22.setObjectName(_fromUtf8("groupBox22"))
self.gridLayout_22 = QtGui.QGridLayout(self.groupBox22)
self.gridLayout_22.setObjectName(_fromUtf8("gridLayout_22"))
self.label1 = QtGui.QLabel(self.groupBox22)
self.label1.setWordWrap(True)
self.label1.setObjectName(_fromUtf8("label1"))
self.gridLayout_22.addWidget(self.label1, 0, 0, 1, 6)
self.label_221 = QtGui.QLabel(self.groupBox22)
self.label_221.setObjectName(_fromUtf8("label_221"))
self.gridLayout_22.addWidget(self.label_221, 1, 0, 1, 1)
self.similar_authors_search_key = QtGui.QComboBox(self.groupBox22)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.similar_authors_search_key.sizePolicy().hasHeightForWidth())
self.similar_authors_search_key.setSizePolicy(sizePolicy)
self.similar_authors_search_key.setObjectName(_fromUtf8("similar_authors_search_key"))
self.gridLayout_22.addWidget(self.similar_authors_search_key, 1, 1, 1, 1)
self.opt_similar_authors_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_authors_match_kind.setObjectName(_fromUtf8("opt_similar_authors_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_authors_match_kind, 1, 2, 1, 1)
self.label_222 = QtGui.QLabel(self.groupBox22)
self.label_222.setObjectName(_fromUtf8("label_222"))
self.gridLayout_22.addWidget(self.label_222, 1, 3, 1, 1)
self.similar_series_search_key = QtGui.QComboBox(self.groupBox22)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.similar_series_search_key.sizePolicy().hasHeightForWidth())
self.similar_series_search_key.setSizePolicy(sizePolicy)
self.similar_series_search_key.setObjectName(_fromUtf8("similar_series_search_key"))
self.gridLayout_22.addWidget(self.similar_series_search_key, 1, 4, 1, 1)
self.opt_similar_series_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_series_match_kind.setObjectName(_fromUtf8("opt_similar_series_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_series_match_kind, 1, 5, 1, 1)
self.label_223 = QtGui.QLabel(self.groupBox22)
self.label_223.setObjectName(_fromUtf8("label_223"))
self.gridLayout_22.addWidget(self.label_223, 2, 0, 1, 1)
self.similar_tags_search_key = QtGui.QComboBox(self.groupBox22)
self.similar_tags_search_key.setObjectName(_fromUtf8("similar_tags_search_key"))
self.gridLayout_22.addWidget(self.similar_tags_search_key, 2, 1, 1, 1)
self.opt_similar_tags_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_tags_match_kind.setObjectName(_fromUtf8("opt_similar_tags_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_tags_match_kind, 2, 2, 1, 1)
self.label_224 = QtGui.QLabel(self.groupBox22)
self.label_224.setObjectName(_fromUtf8("label_224"))
self.gridLayout_22.addWidget(self.label_224, 2, 3, 1, 1)
self.similar_publisher_search_key = QtGui.QComboBox(self.groupBox22)
self.similar_publisher_search_key.setObjectName(_fromUtf8("similar_publisher_search_key"))
self.gridLayout_22.addWidget(self.similar_publisher_search_key, 2, 4, 1, 1)
self.opt_similar_publisher_match_kind = QtGui.QComboBox(self.groupBox22)
self.opt_similar_publisher_match_kind.setObjectName(_fromUtf8("opt_similar_publisher_match_kind"))
self.gridLayout_22.addWidget(self.opt_similar_publisher_match_kind, 2, 5, 1, 1)
self.gridLayout.addWidget(self.groupBox22, 7, 0, 1, 2)
self.label_2.setBuddy(self.opt_limit_search_columns_to)
self.la10.setBuddy(self.gst_names)
self.l11.setBuddy(self.opt_grouped_search_make_user_categories)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_("Form"))
self.opt_search_as_you_type.setText(_("Search as you &type"))
self.opt_use_primary_find_in_search.setText(_("Unaccented characters match accented characters"))
self.opt_highlight_search_matches.setText(_("&Highlight search results instead of restricting the book list to the results"))
self.groupBox.setTitle(_("What to search by default"))
self.label.setText(_("When you enter a search term without a prefix, by default calibre will search all metadata for matches. For example, entering, \"asimov\" will search not just authors but title/tags/series/comments/etc. Use these options if you would like to change this behavior."))
self.opt_limit_search_columns.setText(_("&Limit the searched metadata"))
self.label_2.setText(_("&Columns that non-prefixed searches are limited to:"))
self.label_3.setText(_("Note that this option affects all searches, including saved searches and restrictions. Therefore, if you use this option, it is best to ensure that you always use prefixes in your saved searches. For example, use \"series:Foundation\" rather than just \"Foundation\" in a saved search"))
self.groupBox_2.setTitle(_("Grouped Search Terms"))
self.la10.setText(_("&Names:"))
self.gst_names.setToolTip(_("Contains the names of the currently-defined group search terms.\n"
"Create a new name by entering it into the empty box, then\n"
"pressing Save. Rename a search term by selecting it then\n"
"changing the name and pressing Save. Change the value of\n"
"a search term by changing the value box then pressing Save."))
self.gst_delete_button.setToolTip(_("Delete the current search term"))
self.gst_delete_button.setText(_("..."))
self.gst_save_button.setToolTip(_("Save the current search term. You can rename a search term by\n"
"changing the name then pressing Save. You can change the value\n"
"of a search term by changing the value box then pressing Save."))
self.gst_save_button.setText(_("&Save"))
self.l11.setText(_("Make &user categories from:"))
self.opt_grouped_search_make_user_categories.setToolTip(_("Enter the names of any grouped search terms you wish\n"
"to be shown as user categories"))
self.clear_history_button.setToolTip(_("Clear search histories from all over calibre. Including the book list, e-book viewer, fetch news dialog, etc."))
self.clear_history_button.setText(_("Clear search &histories"))
self.groupBox22.setTitle(_("What to search when searching similar books"))
self.label1.setText(_("<p>When you search for similar books by right clicking the\n"
" book and selecting \"Similar books...\",\n"
" calibre constructs a search using the column lookup names specified below.\n"
" By changing the lookup name to a grouped search term you can\n"
" search multiple columns at once.</p>"))
self.label_221.setText(_("Similar authors: "))
self.label_222.setText(_("Similar series: "))
self.label_223.setText(_("Similar tags: "))
self.label_224.setText(_("Similar publishers: "))
from calibre.gui2.complete2 import EditWithComplete
| gpl-3.0 | 2,683,885,192,424,116,700 | 63.834951 | 320 | 0.699386 | false |
RobRuana/sideboard | sideboard/internal/logging.py | 1 | 1819 | from __future__ import unicode_literals, absolute_import
import os
import logging.config
import logging_unterpolation
from sideboard.config import config
class IndentMultilinesLogFormatter(logging.Formatter):
"""
Provide a formatter (unused by default) which adds indentation to messages
which are split across multiple lines.
"""
def format(self, record):
s = super(IndentMultilinesLogFormatter, self).format(record)
# indent all lines that start with a newline so they are easier for external log programs to parse
s = s.rstrip('\n').replace('\n', '\n ')
return s
def _configure_logging():
logging_unterpolation.patch_logging()
fname = '/etc/sideboard/logging.cfg'
if os.path.exists(fname):
logging.config.fileConfig(fname, disable_existing_loggers=True)
else:
# ConfigObj doesn't support interpolation escaping, so we manually work around it here
formatters = config['formatters'].dict()
for formatter in formatters.values():
formatter['format'] = formatter['format'].replace('$$', '%')
formatter['datefmt'] = formatter['datefmt'].replace('$$', '%') or None
formatters['indent_multiline'] = {
'()': IndentMultilinesLogFormatter,
'format': formatters['default']['format']
}
logging.config.dictConfig({
'version': 1,
'root': {
'level': config['loggers']['root'],
'handlers': config['handlers'].dict().keys()
},
'loggers': {
name: {'level': level}
for name, level in config['loggers'].items() if name != 'root'
},
'handlers': config['handlers'].dict(),
'formatters': formatters
})
| bsd-3-clause | -6,979,212,933,264,625,000 | 35.38 | 106 | 0.59978 | false |
jolyonb/edx-platform | openedx/core/djangoapps/user_api/accounts/tests/test_image_helpers.py | 1 | 2916 | """
Tests for helpers.py
"""
from __future__ import absolute_import
import datetime
import hashlib
from django.test import TestCase
from mock import patch
from pytz import UTC
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import UserFactory
from ..image_helpers import get_profile_image_urls_for_user
TEST_SIZES = {'full': 50, 'small': 10}
TEST_PROFILE_IMAGE_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=UTC)
@patch.dict('django.conf.settings.PROFILE_IMAGE_SIZES_MAP', TEST_SIZES, clear=True)
@skip_unless_lms
class ProfileImageUrlTestCase(TestCase):
"""
Tests for profile image URL generation helpers.
"""
def setUp(self):
super(ProfileImageUrlTestCase, self).setUp()
self.user = UserFactory()
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
def verify_url(self, actual_url, expected_name, expected_pixels, expected_version):
"""
Verify correct url structure.
"""
self.assertEqual(
actual_url,
'http://example-storage.com/profile-images/{name}_{size}.jpg?v={version}'.format(
name=expected_name, size=expected_pixels, version=expected_version
)
)
def verify_default_url(self, actual_url, expected_pixels):
"""
Verify correct url structure for a default profile image.
"""
self.assertEqual(
actual_url,
'/static/default_{size}.png'.format(size=expected_pixels)
)
def verify_urls(self, actual_urls, expected_name, is_default=False):
"""
Verify correct url dictionary structure.
"""
self.assertEqual(set(TEST_SIZES.keys()), set(actual_urls.keys()))
for size_display_name, url in actual_urls.items():
if is_default:
self.verify_default_url(url, TEST_SIZES[size_display_name])
else:
self.verify_url(
url, expected_name, TEST_SIZES[size_display_name], TEST_PROFILE_IMAGE_UPLOAD_DT.strftime("%s")
)
def test_get_profile_image_urls(self):
"""
Tests `get_profile_image_urls_for_user`
"""
self.user.profile.profile_image_uploaded_at = TEST_PROFILE_IMAGE_UPLOAD_DT
self.user.profile.save()
expected_name = hashlib.md5('secret' + self.user.username).hexdigest()
actual_urls = get_profile_image_urls_for_user(self.user)
self.verify_urls(actual_urls, expected_name, is_default=False)
self.user.profile.profile_image_uploaded_at = None
self.user.profile.save()
self.verify_urls(get_profile_image_urls_for_user(self.user), 'default', is_default=True)
| agpl-3.0 | -586,315,608,555,252,000 | 34.560976 | 114 | 0.641632 | false |
splav/servo | tests/wpt/web-platform-tests/tools/wpt/run.py | 1 | 30731 | import argparse
import os
import platform
import sys
from distutils.spawn import find_executable
from six.moves import input
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.insert(0, os.path.abspath(os.path.join(wpt_root, "tools")))
from . import browser, install, testfiles, utils, virtualenv
from ..serve import serve
logger = None
class WptrunError(Exception):
pass
class WptrunnerHelpAction(argparse.Action):
def __init__(self,
option_strings,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help=None):
super(WptrunnerHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
from wptrunner import wptcommandline
wptparser = wptcommandline.create_parser()
wptparser.usage = parser.usage
wptparser.print_help()
parser.exit()
def create_parser():
from wptrunner import wptcommandline
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("product", action="store",
help="Browser to run tests in")
parser.add_argument("--affected", action="store", default=None,
help="Run affected tests since revish")
parser.add_argument("--yes", "-y", dest="prompt", action="store_false", default=True,
help="Don't prompt before installing components")
parser.add_argument("--install-browser", action="store_true",
help="Install the browser from the release channel specified by --channel "
"(or the nightly channel by default).")
parser.add_argument("--channel", action="store",
choices=install.channel_by_name.keys(),
default=None, help='Name of browser release channel. '
'"stable" and "release" are synonyms for the latest browser stable '
'release, "nightly", "dev", "experimental", and "preview" are all '
'synonyms for the latest available development release. (For WebDriver '
'installs, we attempt to select an appropriate, compatible version for '
'the latest browser release on the selected channel.) '
'This flag overrides --browser-channel.')
parser._add_container_actions(wptcommandline.create_parser())
return parser
def exit(msg=None):
if msg:
logger.critical(msg)
sys.exit(1)
else:
sys.exit(0)
def args_general(kwargs):
kwargs.set_if_none("tests_root", wpt_root)
kwargs.set_if_none("metadata_root", wpt_root)
kwargs.set_if_none("manifest_update", True)
kwargs.set_if_none("manifest_download", True)
if kwargs["ssl_type"] in (None, "pregenerated"):
cert_root = os.path.join(wpt_root, "tools", "certs")
if kwargs["ca_cert_path"] is None:
kwargs["ca_cert_path"] = os.path.join(cert_root, "cacert.pem")
if kwargs["host_key_path"] is None:
kwargs["host_key_path"] = os.path.join(cert_root, "web-platform.test.key")
if kwargs["host_cert_path"] is None:
kwargs["host_cert_path"] = os.path.join(cert_root, "web-platform.test.pem")
elif kwargs["ssl_type"] == "openssl":
if not find_executable(kwargs["openssl_binary"]):
if os.uname()[0] == "Windows":
raise WptrunError("""OpenSSL binary not found. If you need HTTPS tests, install OpenSSL from
https://slproweb.com/products/Win32OpenSSL.html
Ensuring that libraries are added to /bin and add the resulting bin directory to
your PATH.
Otherwise run with --ssl-type=none""")
else:
raise WptrunError("""OpenSSL not found. If you don't need HTTPS support run with --ssl-type=none,
otherwise install OpenSSL and ensure that it's on your $PATH.""")
def check_environ(product):
if product not in ("android_weblayer", "android_webview", "chrome", "chrome_android", "firefox", "firefox_android", "servo"):
config_builder = serve.build_config(os.path.join(wpt_root, "config.json"))
# Override the ports to avoid looking for free ports
config_builder.ssl = {"type": "none"}
config_builder.ports = {"http": [8000]}
is_windows = platform.uname()[0] == "Windows"
with config_builder as config:
expected_hosts = set(config.domains_set)
if is_windows:
expected_hosts.update(config.not_domains_set)
missing_hosts = set(expected_hosts)
if is_windows:
hosts_path = r"%s\System32\drivers\etc\hosts" % os.environ.get("SystemRoot", r"C:\Windows")
else:
hosts_path = "/etc/hosts"
if os.path.abspath(os.curdir) == wpt_root:
wpt_path = "wpt"
else:
wpt_path = os.path.join(wpt_root, "wpt")
with open(hosts_path, "r") as f:
for line in f:
line = line.split("#", 1)[0].strip()
parts = line.split()
hosts = parts[1:]
for host in hosts:
missing_hosts.discard(host)
if missing_hosts:
if is_windows:
message = """Missing hosts file configuration. Run
python %s make-hosts-file | Out-File %s -Encoding ascii -Append
in PowerShell with Administrator privileges.""" % (wpt_path, hosts_path)
else:
message = """Missing hosts file configuration. Run
%s make-hosts-file | sudo tee -a %s""" % ("./wpt" if wpt_path == "wpt" else wpt_path,
hosts_path)
raise WptrunError(message)
class BrowserSetup(object):
name = None
browser_cls = None
def __init__(self, venv, prompt=True):
self.browser = self.browser_cls(logger)
self.venv = venv
self.prompt = prompt
def prompt_install(self, component):
if not self.prompt:
return True
while True:
resp = input("Download and install %s [Y/n]? " % component).strip().lower()
if not resp or resp == "y":
return True
elif resp == "n":
return False
def install(self, channel=None):
if self.prompt_install(self.name):
return self.browser.install(self.venv.path, channel)
def install_requirements(self):
if not self.venv.skip_virtualenv_setup:
self.venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", self.browser.requirements))
def setup(self, kwargs):
self.setup_kwargs(kwargs)
def safe_unsetenv(env_var):
"""Safely remove an environment variable.
Python3 does not support os.unsetenv in Windows for python<3.9, so we better
remove the variable directly from os.environ.
"""
try:
del os.environ[env_var]
except KeyError:
pass
class Firefox(BrowserSetup):
name = "firefox"
browser_cls = browser.Firefox
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
if kwargs["browser_channel"] is None:
kwargs["browser_channel"] = "nightly"
logger.info("No browser channel specified. Running nightly instead.")
binary = self.browser.find_binary(self.venv.path,
kwargs["browser_channel"])
if binary is None:
raise WptrunError("""Firefox binary not found on $PATH.
Install Firefox or use --binary to set the binary path""")
kwargs["binary"] = binary
if kwargs["certutil_binary"] is None and kwargs["ssl_type"] != "none":
certutil = self.browser.find_certutil()
if certutil is None:
# Can't download this for now because it's missing the libnss3 library
logger.info("""Can't find certutil, certificates will not be checked.
Consider installing certutil via your OS package manager or directly.""")
else:
logger.info("Using certutil %s" % certutil)
kwargs["certutil_binary"] = certutil
if kwargs["webdriver_binary"] is None and "wdspec" in kwargs["test_types"]:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("geckodriver")
if install:
logger.info("Downloading geckodriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
logger.info("Unable to find or install geckodriver, skipping wdspec tests")
kwargs["test_types"].remove("wdspec")
if kwargs["prefs_root"] is None:
prefs_root = self.browser.install_prefs(kwargs["binary"],
self.venv.path,
channel=kwargs["browser_channel"])
kwargs["prefs_root"] = prefs_root
if kwargs["headless"] is None:
kwargs["headless"] = True
logger.info("Running in headless mode, pass --no-headless to disable")
# Turn off Firefox WebRTC ICE logging on WPT (turned on by mozrunner)
safe_unsetenv('R_LOG_LEVEL')
safe_unsetenv('R_LOG_DESTINATION')
safe_unsetenv('R_LOG_VERBOSE')
# Allow WebRTC tests to call getUserMedia.
kwargs["extra_prefs"].append("media.navigator.streams.fake=true")
class FirefoxAndroid(BrowserSetup):
name = "firefox_android"
browser_cls = browser.FirefoxAndroid
def install(self, channel):
# The install needs to happen in setup so that we have access to all the kwargs
self._install_browser = True
return None
def setup_kwargs(self, kwargs):
from . import android
import mozdevice
# We don't support multiple channels for android yet
if kwargs["browser_channel"] is None:
kwargs["browser_channel"] = "nightly"
if kwargs["prefs_root"] is None:
prefs_root = self.browser.install_prefs(kwargs["binary"],
self.venv.path,
channel=kwargs["browser_channel"])
kwargs["prefs_root"] = prefs_root
if kwargs["package_name"] is None:
kwargs["package_name"] = "org.mozilla.geckoview.test"
app = kwargs["package_name"]
if kwargs["device_serial"] is None:
kwargs["device_serial"] = "emulator-5554"
# We're running on an emulator so ensure that's set up
if kwargs["device_serial"].startswith("emulator-"):
emulator = android.install(logger, reinstall=False, no_prompt=not self.prompt)
android.start(logger, emulator=emulator, reinstall=False)
install = False
if hasattr(self, "_install_browser"):
if self.prompt_install("geckoview-test"):
install = True
apk_path = self.browser.install(self.venv.path,
channel=kwargs["browser_channel"])
if "ADB_PATH" not in os.environ:
adb_path = os.path.join(android.get_sdk_path(None),
"platform-tools",
"adb")
os.environ["ADB_PATH"] = adb_path
adb_path = os.environ["ADB_PATH"]
device = mozdevice.ADBDevice(adb=adb_path,
device=kwargs["device_serial"])
if install:
device.uninstall_app(app)
device.install_app(apk_path)
elif not device.is_app_installed(app):
raise WptrunError("app %s not installed on device %s" %
(app, kwargs["device_serial"]))
class Chrome(BrowserSetup):
name = "chrome"
browser_cls = browser.Chrome
def setup_kwargs(self, kwargs):
browser_channel = kwargs["browser_channel"]
if kwargs["binary"] is None:
binary = self.browser.find_binary(channel=browser_channel)
if binary:
kwargs["binary"] = binary
else:
raise WptrunError("Unable to locate Chrome binary")
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
browser_binary=kwargs["binary"],
)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
if browser_channel in ("dev", "canary"):
logger.info("Automatically turning on experimental features for Chrome Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
# HACK(Hexcles): work around https://github.com/web-platform-tests/wpt/issues/16448
kwargs["webdriver_args"].append("--disable-build-check")
if os.getenv("TASKCLUSTER_ROOT_URL"):
# We are on Taskcluster, where our Docker container does not have
# enough capabilities to run Chrome with sandboxing. (gh-20133)
kwargs["binary_args"].append("--no-sandbox")
class ChromeAndroid(BrowserSetup):
name = "chrome_android"
browser_cls = browser.ChromeAndroid
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
browser_channel = kwargs["browser_channel"]
if kwargs["package_name"] is None:
kwargs["package_name"] = self.browser.find_binary(
channel=browser_channel)
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(
dest=self.venv.bin_path,
browser_binary=kwargs["package_name"],
)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
if browser_channel in ("dev", "canary"):
logger.info("Automatically turning on experimental features for Chrome Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
# HACK(Hexcles): work around https://github.com/web-platform-tests/wpt/issues/16448
kwargs["webdriver_args"].append("--disable-build-check")
class ChromeiOS(BrowserSetup):
name = "chrome_ios"
browser_cls = browser.ChromeiOS
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
raise WptrunError("Unable to locate or install chromedriver binary")
class AndroidWeblayer(BrowserSetup):
name = "android_weblayer"
browser_cls = browser.AndroidWeblayer
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
class AndroidWebview(BrowserSetup):
name = "android_webview"
browser_cls = browser.AndroidWebview
def setup_kwargs(self, kwargs):
if kwargs.get("device_serial"):
self.browser.device_serial = kwargs["device_serial"]
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("chromedriver")
if install:
logger.info("Downloading chromedriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install chromedriver binary")
class Opera(BrowserSetup):
name = "opera"
browser_cls = browser.Opera
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
install = self.prompt_install("operadriver")
if install:
logger.info("Downloading operadriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install operadriver binary")
class EdgeChromium(BrowserSetup):
name = "MicrosoftEdge"
browser_cls = browser.EdgeChromium
def setup_kwargs(self, kwargs):
browser_channel = kwargs["browser_channel"]
if kwargs["binary"] is None:
binary = self.browser.find_binary(channel=browser_channel)
if binary:
kwargs["binary"] = binary
else:
raise WptrunError("Unable to locate Edge binary")
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
# Install browser if none are found or if it's found in venv path
if webdriver_binary is None or webdriver_binary in self.venv.bin_path:
install = self.prompt_install("msedgedriver")
if install:
logger.info("Downloading msedgedriver")
webdriver_binary = self.browser.install_webdriver(dest=self.venv.bin_path, channel=browser_channel)
else:
logger.info("Using webdriver binary %s" % webdriver_binary)
if webdriver_binary:
kwargs["webdriver_binary"] = webdriver_binary
else:
raise WptrunError("Unable to locate or install msedgedriver binary")
if browser_channel in ("dev", "canary"):
logger.info("Automatically turning on experimental features for Edge Dev/Canary")
kwargs["binary_args"].append("--enable-experimental-web-platform-features")
class Edge(BrowserSetup):
name = "edge"
browser_cls = browser.Edge
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the correct
version for your Edge/Windows release somewhere on the %PATH%:
https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
""")
kwargs["webdriver_binary"] = webdriver_binary
class EdgeWebDriver(Edge):
name = "edge_webdriver"
browser_cls = browser.EdgeWebDriver
class InternetExplorer(BrowserSetup):
name = "ie"
browser_cls = browser.InternetExplorer
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("""Unable to find WebDriver and we aren't yet clever enough to work out which
version to download. Please go to the following URL and install the driver for Internet Explorer
somewhere on the %PATH%:
https://selenium-release.storage.googleapis.com/index.html
""")
kwargs["webdriver_binary"] = webdriver_binary
class Safari(BrowserSetup):
name = "safari"
browser_cls = browser.Safari
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver(channel=kwargs["browser_channel"])
if webdriver_binary is None:
raise WptrunError("Unable to locate safaridriver binary")
kwargs["webdriver_binary"] = webdriver_binary
class Sauce(BrowserSetup):
name = "sauce"
browser_cls = browser.Sauce
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["sauce_browser"] is None:
raise WptrunError("Missing required argument --sauce-browser")
if kwargs["sauce_version"] is None:
raise WptrunError("Missing required argument --sauce-version")
kwargs["test_types"] = ["testharness", "reftest"]
class Servo(BrowserSetup):
name = "servo"
browser_cls = browser.Servo
def install(self, channel=None):
if self.prompt_install(self.name):
return self.browser.install(self.venv.path)
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary(self.venv.path, None)
if binary is None:
raise WptrunError("Unable to find servo binary in PATH")
kwargs["binary"] = binary
class ServoWebDriver(Servo):
name = "servodriver"
browser_cls = browser.ServoWebDriver
class WebKit(BrowserSetup):
name = "webkit"
browser_cls = browser.WebKit
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
pass
class WebKitGTKMiniBrowser(BrowserSetup):
name = "webkitgtk_minibrowser"
browser_cls = browser.WebKitGTKMiniBrowser
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary(channel=kwargs["browser_channel"])
if binary is None:
raise WptrunError("Unable to find MiniBrowser binary")
kwargs["binary"] = binary
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver(channel=kwargs["browser_channel"])
if webdriver_binary is None:
raise WptrunError("Unable to find WebKitWebDriver in PATH")
kwargs["webdriver_binary"] = webdriver_binary
class Epiphany(BrowserSetup):
name = "epiphany"
browser_cls = browser.Epiphany
def install(self, channel=None):
raise NotImplementedError
def setup_kwargs(self, kwargs):
if kwargs["binary"] is None:
binary = self.browser.find_binary()
if binary is None:
raise WptrunError("Unable to find epiphany in PATH")
kwargs["binary"] = binary
if kwargs["webdriver_binary"] is None:
webdriver_binary = self.browser.find_webdriver()
if webdriver_binary is None:
raise WptrunError("Unable to find WebKitWebDriver in PATH")
kwargs["webdriver_binary"] = webdriver_binary
product_setup = {
"android_weblayer": AndroidWeblayer,
"android_webview": AndroidWebview,
"firefox": Firefox,
"firefox_android": FirefoxAndroid,
"chrome": Chrome,
"chrome_android": ChromeAndroid,
"chrome_ios": ChromeiOS,
"edgechromium": EdgeChromium,
"edge": Edge,
"edge_webdriver": EdgeWebDriver,
"ie": InternetExplorer,
"safari": Safari,
"servo": Servo,
"servodriver": ServoWebDriver,
"sauce": Sauce,
"opera": Opera,
"webkit": WebKit,
"webkitgtk_minibrowser": WebKitGTKMiniBrowser,
"epiphany": Epiphany,
}
def setup_logging(kwargs, default_config=None, formatter_defaults=None):
import mozlog
from wptrunner import wptrunner
global logger
# Use the grouped formatter by default where mozlog 3.9+ is installed
if default_config is None:
if hasattr(mozlog.formatters, "GroupingFormatter"):
default_formatter = "grouped"
else:
default_formatter = "mach"
default_config = {default_formatter: sys.stdout}
wptrunner.setup_logging(kwargs, default_config, formatter_defaults=formatter_defaults)
logger = wptrunner.logger
return logger
def setup_wptrunner(venv, prompt=True, install_browser=False, **kwargs):
from wptrunner import wptcommandline
from six import iteritems
kwargs = utils.Kwargs(iteritems(kwargs))
kwargs["product"] = kwargs["product"].replace("-", "_")
check_environ(kwargs["product"])
args_general(kwargs)
if kwargs["product"] not in product_setup:
raise WptrunError("Unsupported product %s" % kwargs["product"])
setup_cls = product_setup[kwargs["product"]](venv, prompt)
setup_cls.install_requirements()
affected_revish = kwargs.pop("affected", None)
if affected_revish is not None:
# TODO: Consolidate with `./wpt tests-affected --ignore-rules`:
# https://github.com/web-platform-tests/wpt/issues/14560
files_changed, _ = testfiles.files_changed(
affected_revish,
ignore_rules=["resources/testharness*"],
include_uncommitted=True, include_new=True)
# TODO: Perhaps use wptrunner.testloader.ManifestLoader here
# and remove the manifest-related code from testfiles.
# https://github.com/web-platform-tests/wpt/issues/14421
tests_changed, tests_affected = testfiles.affected_testfiles(
files_changed, manifest_path=kwargs.get("manifest_path"), manifest_update=kwargs["manifest_update"])
test_list = tests_changed | tests_affected
logger.info("Identified %s affected tests" % len(test_list))
test_list = [os.path.relpath(item, wpt_root) for item in test_list]
kwargs["test_list"] += test_list
kwargs["default_exclude"] = True
if install_browser and not kwargs["channel"]:
logger.info("--install-browser is given but --channel is not set, default to nightly channel")
kwargs["channel"] = "nightly"
if kwargs["channel"]:
channel = install.get_channel(kwargs["product"], kwargs["channel"])
if channel is not None:
if channel != kwargs["channel"]:
logger.info("Interpreting channel '%s' as '%s'" % (kwargs["channel"],
channel))
kwargs["browser_channel"] = channel
else:
logger.info("Valid channels for %s not known; using argument unmodified" % kwargs["product"])
kwargs["browser_channel"] = kwargs["channel"]
del kwargs["channel"]
if install_browser:
logger.info("Installing browser")
kwargs["binary"] = setup_cls.install(channel=channel)
setup_cls.setup(kwargs)
wptcommandline.check_args(kwargs)
wptrunner_path = os.path.join(wpt_root, "tools", "wptrunner")
if not venv.skip_virtualenv_setup:
venv.install_requirements(os.path.join(wptrunner_path, "requirements.txt"))
# Only update browser_version if it was not given as a command line
# argument, so that it can be overridden on the command line.
if not kwargs["browser_version"]:
kwargs["browser_version"] = setup_cls.browser.version(
binary=kwargs.get("binary") or kwargs.get("package_name"),
webdriver_binary=kwargs.get("webdriver_binary"),
)
return kwargs
def run(venv, **kwargs):
setup_logging(kwargs)
# Remove arguments that aren't passed to wptrunner
prompt = kwargs.pop("prompt", True)
install_browser = kwargs.pop("install_browser", False)
kwargs = setup_wptrunner(venv,
prompt=prompt,
install_browser=install_browser,
**kwargs)
rv = run_single(venv, **kwargs) > 0
return rv
def run_single(venv, **kwargs):
from wptrunner import wptrunner
return wptrunner.start(**kwargs)
def main():
try:
parser = create_parser()
args = parser.parse_args()
venv = virtualenv.Virtualenv(os.path.join(wpt_root, "_venv_%s") % platform.uname()[0])
venv.start()
venv.install_requirements(os.path.join(wpt_root, "tools", "wptrunner", "requirements.txt"))
venv.install("requests")
return run(venv, vars(args))
except WptrunError as e:
exit(e.message)
if __name__ == "__main__":
import pdb
from tools import localpaths # noqa: F401
try:
main()
except Exception:
pdb.post_mortem()
| mpl-2.0 | -6,961,727,314,799,412,000 | 35.759569 | 129 | 0.605057 | false |
MD-Studio/MDStudio | mdstudio/mdstudio/tests/db/impl/test_mongo_database_wrapper.py | 1 | 74367 | # coding=utf-8
import datetime
import mongomock
import pytz
import twisted
from bson import ObjectId
from faker import Faker
from mock import mock, call
from twisted.internet import reactor
from mdstudio.db.cursor import Cursor, query
from mdstudio.db.exception import DatabaseException
from mdstudio.db.fields import Fields
from mdstudio.db.impl.mongo_client_wrapper import MongoClientWrapper
from mdstudio.service.model import Model
from mdstudio.db.sort_mode import SortMode
from mdstudio.deferred.chainable import test_chainable
from mdstudio.unittest.db import DBTestCase
twisted.internet.base.DelayedCall.debug = True
# noinspection PyUnresolvedReferences
class TestMongoDatabaseWrapper(DBTestCase):
faker = Faker()
def setUp(self):
self.db = MongoClientWrapper("localhost", 27127).get_database('users~userNameDatabase')
self.claims = {
'connectionType': 'user',
'username': 'userNameDatabase'
}
self.d = Model(self.db, 'test_collection')
if not reactor.getThreadPool().started:
reactor.getThreadPool().start()
def test_prepare_sortmode_asc(self):
sort = ('test', SortMode.Asc)
sort = self.db._prepare_sortmode(sort)
self.assertEqual(sort, [('test', 1)])
def test_prepare_sortmode_desc(self):
sort = ('test', SortMode.Desc)
sort = self.db._prepare_sortmode(sort)
self.assertEqual(sort, [('test', -1)])
def test_prepare_sortmode_asc2(self):
sort = ('test', "asc")
sort = self.db._prepare_sortmode(sort)
self.assertEqual(sort, [('test', 1)])
def test_prepare_sortmode_desc2(self):
sort = ('test', "desc")
sort = self.db._prepare_sortmode(sort)
self.assertEqual(sort, [('test', -1)])
def test_prepare_sortmode_list(self):
sort = [
('test', SortMode.Desc),
('test2', SortMode.Asc),
]
sort = self.db._prepare_sortmode(sort)
self.assertEqual(sort, [
('test', -1),
('test2', 1)
])
def test_prepare_for_json(self):
document = {
'o': {
'date': [datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
datetime.datetime(2017, 10, 26, 9, 15, tzinfo=pytz.utc)],
'f': '2017-10-26T09:15:00+00:00'
}
}
self.db._prepare_for_json(document)
self.assertEqual(document, {
'o': {
'date': [datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
datetime.datetime(2017, 10, 26, 9, 15, tzinfo=pytz.utc)],
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_prepare_for_json_id(self):
document = {
'_id': 1000,
'o': {
'date': [datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
datetime.datetime(2017, 10, 26, 9, 15, tzinfo=pytz.utc)],
'f': '2017-10-26T09:15:00+00:00'
}
}
self.db._prepare_for_json(document)
self.assertEqual(document, {
'_id': '1000',
'o': {
'date': [datetime.datetime(2017, 10, 26, 9, 16, tzinfo=pytz.utc),
datetime.datetime(2017, 10, 26, 9, 15, tzinfo=pytz.utc)],
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_prepare_for_json_none(self):
document = None
self.db._prepare_for_json(document)
self.assertEqual(document, None)
def test_prepare_for_json_int(self):
document = 2
self.db._prepare_for_json(document)
self.assertEqual(document, 2)
def test_prepare_for_json_int_list(self):
document = [2, 3, 4]
self.db._prepare_for_json(document)
self.assertEqual(document, [2, 3, 4])
def test_prepare_for_mongo(self):
document = {
'o': {
'f': '2017-10-26T09:15:00+00:00'
}
}
result = self.db._prepare_for_mongo(document)
self.assertIsNot(document, result)
self.assertEqual(result, {
'o': {
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_prepare_for_mongo_id(self):
document = {
'_id': '0123456789ab0123456789ab',
'o': {
'f': '2017-10-26T09:15:00+00:00'
}
}
result = self.db._prepare_for_mongo(document)
self.assertIsNot(document, result)
self.assertEqual(result, {
'_id': ObjectId('0123456789ab0123456789ab'),
'o': {
'f': '2017-10-26T09:15:00+00:00'
}
})
def test_prepare_for_mongo_none(self):
document = None
result = self.db._prepare_for_mongo(document)
self.assertEqual(result, None)
def test_get_collection_dict(self):
self.db._logger = mock.MagicMock()
collection = {
'name': 'test_collection'
}
self.assertEqual(self.db._get_collection(collection), None)
self.db._logger.info.assert_not_called()
def test_get_collection_dict_create(self):
self.db._logger = mock.MagicMock()
collection = {
'name': 'test_collection'
}
self.assertIsInstance(self.db._get_collection(collection, create=True), mongomock.collection.Collection)
self.db._logger.info.assert_called_once_with('Creating collection {collection} in {database}',
collection='test_collection', database='users~userNameDatabase')
def test_get_collection_str(self):
self.db._logger = mock.MagicMock()
collection = 'test_collection'
self.assertEqual(self.db._get_collection(collection), None)
self.db._logger.info.assert_not_called()
def test_get_collection_str_create(self):
self.db._logger = mock.MagicMock()
collection = 'test_collection'
self.assertIsInstance(self.db._get_collection(collection, create=True), mongomock.collection.Collection)
self.db._logger.info.assert_called_once_with('Creating collection {collection} in {database}',
collection='test_collection', database='users~userNameDatabase')
@test_chainable
def test_insert_one(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
o = {'test': 2, '_id': '0123456789ab0123456789ab'}
oid = yield self.d.insert_one(o)
self.assertEqual(oid, '0123456789ab0123456789ab')
self.db._prepare_for_mongo.assert_called_with(o)
found = yield self.d.find_one({'_id': '0123456789ab0123456789ab'})
self.assertEqual(found, {'test': 2, '_id': '0123456789ab0123456789ab'})
@test_chainable
def test_insert_one_not_modified(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
o = {'test': 2, '_id': '0123456789ab0123456789ab'}
yield self.d.insert_one(o)
found = yield self.d.find_one({'_id': '0123456789ab0123456789ab'})
self.assertIsNot(o, found)
@test_chainable
def test_insert_one_no_id(self):
oid = yield self.d.insert_one({'test': 2})
found = yield self.d.find_one({'_id': oid})
self.assertEqual(found, {'test': 2, '_id': oid})
@test_chainable
def test_insert_one_create_flag(self):
self.db._get_collection = mock.MagicMock()
yield self.d.insert_one({'test': 2})
self.db._get_collection.assert_called_once_with('test_collection', True)
@test_chainable
def test_insert_one_date_time_fields(self):
ldatetime = self.faker.date_time(pytz.utc)
yield self.d.insert_one({'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': ldatetime},
fields=Fields(date_times=['datetime']))
found = yield self.d.find_one({'_id': '0123456789ab0123456789ab'})
self.assertEqual(found, {'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': ldatetime})
@test_chainable
def test_insert_one_date_fields(self):
date = self.faker.date_object()
yield self.d.insert_one({'test': 2, '_id': '0123456789ab0123456789ab', 'date': date}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '0123456789ab0123456789ab'}, fields=Fields(dates=['date']))
self.assertEqual(found, {'test': 2, '_id': '0123456789ab0123456789ab', 'date': date})
@test_chainable
def test_insert_one_date_fields2(self):
date = self.faker.date_object()
stored = datetime.datetime.combine(date, datetime.time(hour=0, tzinfo=pytz.utc))
yield self.d.insert_one({'test': 2, '_id': '0123456789ab0123456789ab', 'date': date}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '0123456789ab0123456789ab'})
self.assertEqual(found, {'test': 2, '_id': '0123456789ab0123456789ab', 'date': stored})
@test_chainable
def test_insert_many(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
]
ids = yield self.d.insert_many(obs)
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._prepare_for_mongo.assert_called_with(obs)
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 3, '_id': ids[1]})
@test_chainable
def test_insert_many_not_modified(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
]
ids = yield self.d.insert_many(obs)
found1 = yield self.d.find_one({'_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertIsNot(found1, obs[0])
self.assertIsNot(found2, obs[1])
@test_chainable
def test_insert_many_no_ids(self):
ids = yield self.d.insert_many([
{'test': 2},
{'test': 3}
])
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 3, '_id': ids[1]})
@test_chainable
def test_insert_many_create_flag(self):
self.db._get_collection = mock.MagicMock()
yield self.d.insert_many([
{'test': 2},
{'test': 3}
], fields=Fields(date_times=['date']))
self.db._get_collection.assert_called_once_with('test_collection', True)
@test_chainable
def test_insert_many_date_time_fields(self):
ldatetime = self.faker.date_time(pytz.utc)
yield self.d.insert_many([{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': ldatetime}],
fields=Fields(date_times=['datetime']))
found = yield self.d.find_many({'_id': '0123456789ab0123456789ab'}).to_list()
self.assertEqual(found[0], {'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': ldatetime})
@test_chainable
def test_insert_many_date_fields(self):
date = self.faker.date_object()
yield self.d.insert_many([{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date}], fields=Fields(dates=['date']))
found = yield self.d.find_many({'_id': '0123456789ab0123456789ab'}, fields=Fields(dates=['date'])).to_list()
self.assertEqual(found[0], {'test': 2, '_id': '0123456789ab0123456789ab', 'date': date})
@test_chainable
def test_insert_many_date_fields2(self):
date = self.faker.date_object()
stored = datetime.datetime.combine(date, datetime.time(hour=0, tzinfo=pytz.utc))
yield self.d.insert_many([{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date}], fields=Fields(dates=['date']))
found = yield self.d.find_many({'_id': '0123456789ab0123456789ab'}).to_list()
self.assertEqual(found[0], {'test': 2, '_id': '0123456789ab0123456789ab', 'date': stored})
@test_chainable
def test_replace_one(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
]
ids = yield self.d.insert_many(obs)
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.replace_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'test2': 6})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.db._prepare_for_mongo.assert_has_calls(
[call(obs), call({'_id': '59f1d9c57dd5d70043e74f8d'}), call({'test2': 6})])
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test2': 6, '_id': ids[1]})
self.assertEqual(result.matched, 1)
self.assertEqual(result.modified, 1)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_replace_one_upsert(self):
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
])
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.replace_one({'_id': '666f6f2d6261722d71757578'}, {'test': 6}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 3, '_id': ids[1]})
found3 = yield self.d.find_one({'_id': '666f6f2d6261722d71757578'})
self.assertEqual(found3, {'test': 6, '_id': '666f6f2d6261722d71757578'})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, '666f6f2d6261722d71757578')
@test_chainable
def test_replace_one_no_collection(self):
result = yield self.d.replace_one({'_id': '666f6f2d6261722d71757578'}, {'test': 6})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_replace_one_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.replace_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'test': 6, 'datetime': datetime}, fields=Fields(date_times=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime})
@test_chainable
def test_replace_one_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.replace_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'test': 6, 'date': date}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date']))
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date})
@test_chainable
def test_replace_one_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.replace_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'test': 6, 'date': date})
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored})
@test_chainable
def test_update_one(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
]
ids = yield self.d.insert_many(obs)
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.update_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.db._prepare_for_mongo.assert_has_calls(
[call(obs), call({'_id': '59f1d9c57dd5d70043e74f8d'}), call({'$set': {'test': 6}})])
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 6, '_id': ids[1]})
self.assertEqual(result.matched, 1)
self.assertEqual(result.modified, 1)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_one_functionality(self):
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 2, '_id': '59f1d9c57dd5d70043e74f8d'},
{'test': 6, '_id': '666f6f2d6261722d71757578'}
]
ids = yield self.d.insert_many(obs)
result = yield self.d.update_one({'test': 2}, {'$set': {'test2': 6}})
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, 'test2': 6, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 2, '_id': ids[1]})
found3 = yield self.d.find_one({'_id': ids[2]})
self.assertEqual(found3, {'test': 6, '_id': ids[2]})
self.assertEqual(result.matched, 1)
self.assertEqual(result.modified, 1)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_one_upsert(self):
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
])
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.update_one({'_id': '666f6f2d6261722d71757578'}, {'$set': {'test': 6}}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 3, '_id': ids[1]})
found3 = yield self.d.find_one({'_id': '666f6f2d6261722d71757578'})
self.assertEqual(found3, {'test': 6, '_id': '666f6f2d6261722d71757578'})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, '666f6f2d6261722d71757578')
@test_chainable
def test_update_one_no_collection(self):
result = yield self.d.update_one({'_id': '666f6f2d6261722d71757578'}, {'$set': {'test': 6}})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_one_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(date_times=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2})
@test_chainable
def test_update_one_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date']))
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2})
@test_chainable
def test_update_one_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date2, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_one({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored})
@test_chainable
def test_update_many(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
]
ids = yield self.d.insert_many(obs)
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.update_many({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.db._prepare_for_mongo.assert_has_calls(
[call(obs), call({'_id': '59f1d9c57dd5d70043e74f8d'}), call({'$set': {'test': 6}})])
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 6, '_id': ids[1]})
self.assertEqual(result.matched, 1)
self.assertEqual(result.modified, 1)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_many_functionality(self):
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = [
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 2, '_id': '59f1d9c57dd5d70043e74f8d'},
{'test': 6, '_id': '666f6f2d6261722d71757578'}
]
ids = yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.update_many({'test': 2}, {'$set': {'test2': 6}})
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, 'test2': 6, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 2, 'test2': 6, '_id': ids[1]})
found3 = yield self.d.find_one({'_id': ids[2]})
self.assertEqual(found3, {'test': 6, '_id': ids[2]})
self.assertEqual(result.matched, 2)
self.assertEqual(result.modified, 2)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_many_upsert(self):
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab'},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d'},
])
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
result = yield self.d.update_many({'_id': '666f6f2d6261722d71757578'}, {'$set': {'test': 6}}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found1 = yield self.d.find_one({'_id': ids[0]})
self.assertEqual(found1, {'test': 2, '_id': ids[0]})
found2 = yield self.d.find_one({'_id': ids[1]})
self.assertEqual(found2, {'test': 3, '_id': ids[1]})
found3 = yield self.d.find_one({'_id': '666f6f2d6261722d71757578'})
self.assertEqual(found3, {'test': 6, '_id': '666f6f2d6261722d71757578'})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, '666f6f2d6261722d71757578')
@test_chainable
def test_update_many_no_collection(self):
result = yield self.d.update_many({'_id': '666f6f2d6261722d71757578'}, {'$set': {'test': 6}})
self.assertEqual(result.matched, 0)
self.assertEqual(result.modified, 0)
self.assertEqual(result.upserted_id, None)
@test_chainable
def test_update_many_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_many({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(date_times=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2})
@test_chainable
def test_update_many_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_many({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date']))
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2})
@test_chainable
def test_update_many_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date2, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.update_many({'_id': '59f1d9c57dd5d70043e74f8d'}, {'$set': {'test': 6}}, fields=Fields(dates=['date']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 6, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored})
@test_chainable
def test_find_one(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one({'test': i})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
@test_chainable
def test_find_one_projection(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one({'test': i}, {'_id': 0})
self.assertEqual(found, {'test': i})
@test_chainable
def test_find_one_skip(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one({'test': i}, skip=1, sort=('_id', SortMode.Asc))
self.assertEqual(obs[i + total]['_id'], ids[i + total])
self.assertEqual(found, obs[i + total])
@test_chainable
def test_find_one_sort(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one({'test': i}, sort=('_id', SortMode.Desc))
self.assertEqual(obs[i + total]['_id'], ids[i + total])
self.assertEqual(found, obs[i + total])
for i in range(total):
found = yield self.d.find_one({'test': i}, sort=('_id', SortMode.Asc))
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
@test_chainable
def test_find_one_no_collection(self):
result = yield self.d.find_one({'_id': '666f6f2d6261722d71757578'})
self.assertEqual(result, None)
@test_chainable
def test_find_one_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2})
@test_chainable
def test_find_one_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date']))
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2})
@test_chainable
def test_find_one_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date2, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored})
@test_chainable
def test_find_many(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_many({})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertIsInstance(found, Cursor)
self.assertSequenceEqual((yield found.to_list()), obs)
@test_chainable
def test_find_many_parse_result(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
fields = Fields()
fields.parse_result = mock.MagicMock(wraps=fields.parse_result)
yield self.db.find_many('test_collection', {}, fields=fields, claims={'user': 'test'})
fields.parse_result.assert_has_calls([])
@test_chainable
def test_find_many_projection(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
found = yield self.d.find_many({}, {'_id': 0})
self.assertIsInstance(found, Cursor)
self.assertSequenceEqual((yield found.to_list()), query(obs).select(lambda x: {'test': x['test']}).to_list())
@test_chainable
def test_find_many_skip(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
found = yield self.d.find_many({}, skip=10)
self.assertIsInstance(found, Cursor)
self.assertSequenceEqual((yield found.to_list()), obs[10:])
@test_chainable
def test_find_many_limit(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
found = yield self.d.find_many({}, limit=10)
self.assertIsInstance(found, Cursor)
self.assertSequenceEqual((yield found.to_list()), obs[:10])
@test_chainable
def test_find_many_sort(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
found = yield self.d.find_many({}, sort=('test', SortMode.Desc))
self.assertIsInstance(found, Cursor)
self.assertListEqual((yield found.to_list()), list(reversed(obs)))
@test_chainable
def test_find_many_no_collection(self):
result = yield self.d.find_many({'_id': '666f6f2d6261722d71757578'})
self.assertIsInstance(result, Cursor)
self.assertSequenceEqual((yield result.to_list()), [])
@test_chainable
def test_find_many_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_many({'_id': '59f1d9c57dd5d70043e74f8d'}).to_list()
self.assertEqual(found[0], {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2})
@test_chainable
def test_find_many_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_many({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date'])).to_list()
self.assertEqual(found[0], {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2})
@test_chainable
def test_find_many_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date2, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_many({'_id': '59f1d9c57dd5d70043e74f8d'}).to_list()
self.assertEqual(found[0], {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored})
@test_chainable
def test_more_dry(self):
d = self.d.wrapper.more("wefwefwef")
yield self.assertFailure(d, DatabaseException)
@test_chainable
def test_rewind(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_many({})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertIsInstance(found, Cursor)
for i in range(total):
self.assertEqual((yield next(found)), obs[i])
yield found.rewind()
for i in range(total):
self.assertEqual((yield next(found)), obs[i])
@test_chainable
def test_rewind_dry(self):
yield self.assertFailure(self.d.wrapper.rewind("wefwefwef"), DatabaseException)
@test_chainable
def test_count_cursor(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_many({})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertIsInstance(found, Cursor)
self.assertEqual((yield found.count()), 200)
@test_chainable
def test_count_no_filter(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.count()
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 200)
@test_chainable
def test_count_filter(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.count({'test': {'$gt': 50}})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 149)
@test_chainable
def test_count_filter_skip(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.count({'test': {'$lt': 50}}, skip=10)
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 40)
@test_chainable
def test_count_filter_limit(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.count({'test': {'$gt': 50}}, limit=10)
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 10)
@test_chainable
def test_count_filter_no_collection(self):
result = yield self.d.count({'_id': '666f6f2d6261722d71757578'})
self.assertEqual(result, 0)
@test_chainable
def test_count_neither(self):
result = yield self.d.count()
self.assertEqual(result, 0)
@test_chainable
def test_count_filter_date_time_fields(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId()), 'datetime': self.faker.date_time(pytz.utc)})
yield self.d.insert_many(obs, fields=Fields(date_times=['datetime']))
yield self.d.count({'test': {'$gt': 50}}, fields=Fields(date_times=['datetime']))
@test_chainable
def test_count_filter_date_fields(self):
total = 200
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId()), 'date': self.faker.date_time(pytz.utc)})
yield self.d.insert_many(obs, fields=Fields(dates=['date']))
yield self.d.count({'test': {'$gt': 50}}, fields=Fields(dates=['date']))
@test_chainable
def test_find_one_and_update(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found2, {'_id': obs[i]['_id'], 'test': i, 'test2': total - i})
@test_chainable
def test_find_one_and_update_upsert(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found2, {'_id': obs[i]['_id'], 'test': i, 'test2': total - i})
@test_chainable
def test_find_one_and_update_upsert2(self):
total = 100
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found, None)
self.assertEqual(found2['test'], i)
self.assertEqual(found2['test2'], total - i)
@test_chainable
def test_find_one_and_update_projection(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}}, projection={'_id': 0})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, {'test': i})
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found2, {'_id': obs[i]['_id'], 'test': i, 'test2': total - i})
@test_chainable
def test_find_one_and_update_sort(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}},
sort=('_id', SortMode.Desc))
self.assertEqual(obs[i + total]['_id'], ids[i + total])
self.assertEqual(found, obs[i + total])
for i in range(total):
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}},
sort=('_id', SortMode.Asc))
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
@test_chainable
def test_find_one_and_update_return_updated(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_update({'test': i}, {'$set': {'test2': total - i}}, return_updated=True)
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, {'_id': obs[i]['_id'], 'test': i, 'test2': total - i})
@test_chainable
def test_find_one_and_update_collection(self):
obs = [{'test': 1, '_id': str(ObjectId())}]
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
yield self.d.find_one_and_update({'test': 1}, {'$set': {'test2': 0}})
self.db._get_collection.assert_called_once_with('test_collection', False)
@test_chainable
def test_find_one_and_update_no_collection(self):
result = yield self.d.find_one_and_update({'_id': '666f6f2d6261722d71757578'}, {'$set': {'test': 6}})
self.assertEqual(result, None)
@test_chainable
def test_find_one_and_update_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_update({'_id': '59f1d9c57dd5d70043e74f8d'}, {
'$set': {'datetime2': datetime}
}, fields=Fields(date_times=['datetime']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(date_times=['datetime', 'datetime2']))
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2, 'datetime2': datetime})
@test_chainable
def test_find_one_and_update_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_update({'_id': '59f1d9c57dd5d70043e74f8d'}, {
'$set': {'date2': date}
}, fields=Fields(dates=['date2']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date', 'date2']))
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2, 'date2': date})
@test_chainable
def test_find_one_and_update_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date, datetime.time(hour=0, tzinfo=pytz.utc))
stored2 = datetime.datetime.combine(date2, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
yield self.d.find_one_and_update({'_id': '59f1d9c57dd5d70043e74f8d'}, {
'$set': {'date2': date}
}, fields=Fields(dates=['date2']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': stored2, 'date2': stored})
@test_chainable
def test_find_one_and_replace(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
found2 = yield self.d.find_one({'test': i})
found3 = yield self.d.find_one({'test2': total - i})
self.assertEqual(found2, None)
self.assertEqual(found3, {'_id': obs[i]['_id'], 'test2': total - i})
@test_chainable
def test_find_one_and_replace_upsert(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
found2 = yield self.d.find_one({'test': i})
found3 = yield self.d.find_one({'test2': total - i})
self.assertEqual(found2, None)
self.assertEqual(found3, {'_id': obs[i]['_id'], 'test2': total - i})
@test_chainable
def test_find_one_and_replace_upsert2(self):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': 0}, {'test2': 100}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found2 = yield self.d.find_one({'test': 0})
found3 = yield self.d.find_one({'test2': 100})
self.assertEqual(found, None)
self.assertEqual(found2, None)
self.assertEqual(found3['test2'], 100)
@test_chainable
def test_find_one_and_replace_upsert3(self):
total = 100
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, upsert=True)
self.db._get_collection.assert_called_once_with('test_collection', True)
found2 = yield self.d.find_one({'test': i})
found3 = yield self.d.find_one({'test2': total - i})
self.assertEqual(found, None)
self.assertEqual(found2, None)
self.assertEqual(found3['test2'], total - i)
@test_chainable
def test_find_one_and_replace_projection(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, projection={'_id': 0})
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, {'test': i})
found2 = yield self.d.find_one({'test': i})
found3 = yield self.d.find_one({'test2': total - i})
self.assertEqual(found2, None)
self.assertEqual(found3, {'_id': obs[i]['_id'], 'test2': total - i})
@test_chainable
def test_find_one_and_replace_sort(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, sort=('_id', SortMode.Desc))
self.assertEqual(obs[i + total]['_id'], ids[i + total])
self.assertEqual(found, obs[i + total])
for i in range(total):
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, sort=('_id', SortMode.Asc))
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
@test_chainable
def test_find_one_and_replace_return_updated(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_replace({'test': i}, {'test2': total - i}, return_updated=True)
self.db._get_collection.assert_called_once_with('test_collection', False)
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, {'_id': obs[i]['_id'], 'test2': total - i})
@test_chainable
def test_find_one_and_replace_collection(self):
obs = [{'test': 1, '_id': str(ObjectId())}]
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
yield self.d.find_one_and_replace({'test': 1}, {'test2': 0})
self.db._get_collection.assert_called_once_with('test_collection', False)
@test_chainable
def test_find_one_and_replace_no_collection(self):
result = yield self.d.find_one_and_replace({'_id': '666f6f2d6261722d71757578'}, {'test': 6})
self.assertEqual(result, None)
@test_chainable
def test_find_one_and_replace_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_replace({'_id': '59f1d9c57dd5d70043e74f8d'}, {'datetime2': datetime},
fields=Fields(date_times=['datetime']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(date_times=['datetime', 'datetime2']))
self.assertEqual(found, {'_id': '59f1d9c57dd5d70043e74f8d', 'datetime2': datetime})
@test_chainable
def test_find_one_and_replace_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_replace({'_id': '59f1d9c57dd5d70043e74f8d'}, {'date2': date}, fields=Fields(dates=['date2']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date', 'date2']))
self.assertEqual(found, {'_id': '59f1d9c57dd5d70043e74f8d', 'date2': date})
@test_chainable
def test_find_one_and_replace_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
stored = datetime.datetime.combine(date, datetime.time(hour=0, tzinfo=pytz.utc))
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
yield self.d.find_one_and_replace({'_id': '59f1d9c57dd5d70043e74f8d'}, {'date2': date}, fields=Fields(dates=['date2']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, {'_id': '59f1d9c57dd5d70043e74f8d', 'date2': stored})
@test_chainable
def test_find_one_and_delete(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_delete({'test': i})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found2, None)
@test_chainable
def test_find_one_and_delete_projection(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.find_one_and_delete({'test': i}, projection={'_id': 0})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, {'test': i})
found2 = yield self.d.find_one({'test': i})
self.assertEqual(found2, None)
@test_chainable
def test_find_one_and_delete_sort(self):
total = 100
self.db._prepare_for_mongo = mock.MagicMock(wraps=self.db._prepare_for_mongo)
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
ids = yield self.d.insert_many(obs)
for i in range(total):
found = yield self.d.find_one_and_delete({'test': i}, sort=('_id', SortMode.Desc))
self.assertEqual(obs[i + total]['_id'], ids[i + total])
self.assertEqual(found, obs[i + total])
for i in range(total):
found = yield self.d.find_one_and_delete({'test': i}, sort=('_id', SortMode.Asc))
self.assertEqual(obs[i]['_id'], ids[i])
self.assertEqual(found, obs[i])
@test_chainable
def test_find_one_and_delete_collection(self):
obs = [{'test': 1, '_id': str(ObjectId())}]
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
yield self.d.find_one_and_delete({'test': 1}, {'test2': 0})
self.db._get_collection.assert_called_once_with('test_collection')
@test_chainable
def test_find_one_and_delete_no_collection(self):
result = yield self.d.find_one_and_delete({'_id': '666f6f2d6261722d71757578'}, {'test': 6})
self.assertEqual(result, None)
@test_chainable
def test_find_one_and_delete_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_delete({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(date_times=['datetime']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(date_times=['datetime', 'datetime2']))
self.assertEqual(found, None)
@test_chainable
def test_find_one_and_delete_date_fields(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.find_one_and_delete({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date2']))
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date', 'date2']))
self.assertEqual(found, None)
@test_chainable
def test_find_one_and_delete_date_fields2(self):
date = self.faker.date_object()
date2 = self.faker.date_object()
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'date': date},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'date': date2},
], fields=Fields(dates=['date']))
yield self.d.find_one_and_delete({'_id': '59f1d9c57dd5d70043e74f8d'}, fields=Fields(dates=['date2']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
found = yield self.d.find_one({'_id': '59f1d9c57dd5d70043e74f8d'})
self.assertEqual(found, None)
@test_chainable
def test_distinct(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.distinct('test')
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(found, list(range(total)))
@test_chainable
def test_distinct2(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.distinct('test')
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(found, list(range(total)))
@test_chainable
def test_distinct_filter(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.distinct('test', {'test': {'$gt': 50}})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(found, list(range(51, total)))
@test_chainable
def test_distinct_collection(self):
obs = [{'test': 1, '_id': str(ObjectId())}]
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
yield self.d.distinct('test')
self.db._get_collection.assert_called_once_with('test_collection')
@test_chainable
def test_distinct_no_collection(self):
result = yield self.d.distinct('test')
self.assertEqual(result, [])
@test_chainable
def test_distinct_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.distinct('test', fields=Fields(date_times=['datetime']))
@test_chainable
def test_aggregate(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total * 2):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.aggregate([
{
'$match': {'test': {'$gt': 50}}
},
{
'$group': {
'_id': '$test2',
'count': {'$sum': 1}
}
}
])
self.db._get_collection.assert_called_once_with('test_collection')
self.assertIsInstance(found, Cursor)
found_list = yield found.to_list()
self.assertEqual(found_list[0], {'_id': '0', 'count': 49})
self.assertEqual(found_list[1], {'_id': '1', 'count': 149})
@test_chainable
def test_aggregate2(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total * 2):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
found = yield self.d.aggregate([
{
'$match': {'test': {'$gt': 50}}
},
{
'$group': {
'_id': '$test',
'count': {'$sum': 1}
}
},
{
'$sort': {
'_id': int(SortMode.Asc)
}
}
])
self.db._get_collection.assert_called_once_with('test_collection')
self.assertIsInstance(found, Cursor)
found_list = yield found.to_list()
for i in range(total * 2 - 51):
self.assertEqual(found_list[i], {'_id': str(51 + i), 'count': 1 if i >= 49 else 2})
@test_chainable
def test_aggregate_no_collection(self):
result = yield self.d.aggregate([])
self.assertSequenceEqual((yield result.to_list()), [])
@test_chainable
def test_delete_one(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.delete_one({'test': 1})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 1)
found = yield self.d.find_one({'test': 1})
self.assertEqual(found['test'], 1)
self.assertEqual(found['test2'], 1)
@test_chainable
def test_delete_one_no_collection(self):
result = yield self.d.delete_one({})
self.assertEqual(result, 0)
@test_chainable
def test_delete_one_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.delete_one({'datetime': datetime}, fields=Fields(date_times=['datetime']))
@test_chainable
def test_delete_many(self):
total = 100
obs = []
for i in range(total):
obs.append({'test': i, 'test2': 0, '_id': str(ObjectId())})
for i in range(total):
obs.append({'test': i, 'test2': 1, '_id': str(ObjectId())})
yield self.d.insert_many(obs)
self.db._get_collection = mock.MagicMock(wraps=self.db._get_collection)
count = yield self.d.delete_many({'test': 1})
self.db._get_collection.assert_called_once_with('test_collection')
self.assertEqual(count, 2)
found = yield self.d.find_one({'test': 1})
self.assertEqual(found, None)
@test_chainable
def test_delete_many_no_collection(self):
result = yield self.d.delete_many({})
self.assertEqual(result, 0)
@test_chainable
def test_delete_many_date_time_fields(self):
datetime = self.faker.date_time(pytz.utc)
datetime2 = self.faker.date_time(pytz.utc)
ids = yield self.d.insert_many([
{'test': 2, '_id': '0123456789ab0123456789ab', 'datetime': datetime},
{'test': 3, '_id': '59f1d9c57dd5d70043e74f8d', 'datetime': datetime2},
], fields=Fields(date_times=['datetime']))
self.assertEqual(ids, ['0123456789ab0123456789ab', '59f1d9c57dd5d70043e74f8d'])
yield self.d.delete_many({'datetime': datetime}, fields=Fields(date_times=['datetime']))
| apache-2.0 | 94,760,294,051,316,580 | 39.285482 | 140 | 0.587855 | false |
MarkusHackspacher/unknown-horizons | horizons/world/buildability/potentialroadconnectivitycache.py | 1 | 3725 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.ai.aiplayer.constants import BUILDING_PURPOSE
from horizons.world.buildability.connectedareacache import ConnectedAreaCache
class PotentialRoadConnectivityCache:
"""
Query whether a toad connection between two sets of coordinates is possible.
This class is used by the AI to figure out whether it might be possible to build a
road between two sets of coordinates. Potentially because the area contains some part
of the AI's plan and the land it has a plan for may be either owned by the AI or not
yet owned by anyone.
"""
def __init__(self, area_builder):
self._area_builder = area_builder
self._land_manager = area_builder.land_manager
self._settlement_ground_map = area_builder.settlement.ground_map
self._cache = ConnectedAreaCache()
self.area_numbers = self._cache.area_numbers # {(x, y): area id, ...}
def modify_area(self, coords_list):
"""
Refresh the usability of the coordinates in the given list.
This function is called with a list of coordinates on which the possibility of
building a road may have changed. It figures out whether it is possible to build
a road on (x, y) and updates the underlying ConnectedAreaCache accordingly.
"""
add_list = []
remove_list = []
for coords in coords_list:
if coords not in self._settlement_ground_map:
if coords in self.area_numbers:
remove_list.append(coords)
elif coords in self._land_manager.coastline:
if coords in self.area_numbers:
remove_list.append(coords)
elif coords in self._land_manager.roads:
if coords not in self.area_numbers:
add_list.append(coords)
elif coords in self._area_builder.plan:
if self._area_builder.plan[coords][0] == BUILDING_PURPOSE.NONE:
if coords not in self.area_numbers:
add_list.append(coords)
else:
assert self._area_builder.plan[coords][0] != BUILDING_PURPOSE.ROAD
if coords in self.area_numbers:
remove_list.append(coords)
else:
if coords in self.area_numbers:
remove_list.append(coords)
if add_list:
self._cache.add_area(add_list)
if remove_list:
self._cache.remove_area(remove_list)
def is_connection_possible(self, coords_set1, coords_set2):
"""Return True if and only if it is possible to connect the two coordinate sets.
More specifically, it returns True if and only if it is possible to build a toad
from some (x1, y1) in coords_set1 to some (x2, y2) in coords_set2 entirely within
the area. This is done cheaply using the underlying ConnectedAreaCache.
"""
areas1 = set()
for coords in coords_set1:
if coords in self.area_numbers:
areas1.add(self.area_numbers[coords])
for coords in coords_set2:
if coords in self.area_numbers:
if self.area_numbers[coords] in areas1:
return True
return False
| gpl-2.0 | -8,918,137,794,307,005,000 | 37.010204 | 86 | 0.713826 | false |
beeftornado/sentry | src/sentry/integrations/msteams/utils.py | 1 | 3654 | from __future__ import absolute_import
import six
import logging
import enum
from django.http import Http404
from sentry.models import (
Integration,
Organization,
IdentityProvider,
)
from sentry.shared_integrations.exceptions import ApiError
from sentry.utils.compat import filter
from .client import MsTeamsClient, MsTeamsPreInstallClient, get_token_data
MSTEAMS_MAX_ITERS = 100
logger = logging.getLogger("sentry.integrations.msteams")
# MS Teams will convert integers into strings in value inputs sent in adaptive
# cards, may as well just do that here first.
class ACTION_TYPE(six.text_type, enum.Enum):
RESOLVE = "1"
IGNORE = "2"
ASSIGN = "3"
UNRESOLVE = "4"
UNASSIGN = "5"
def channel_filter(channel, name):
# the general channel has no name in the list
# retrieved from the REST API call
if channel.get("name"):
return name.lower() == channel.get("name").lower()
else:
return name.lower() == "general"
def get_channel_id(organization, integration_id, name):
try:
integration = Integration.objects.get(
provider="msteams", organizations=organization, id=integration_id
)
except Integration.DoesNotExist:
return None
team_id = integration.external_id
client = MsTeamsClient(integration)
# handle searching for channels first
channel_list = client.get_channel_list(team_id)
filtered_channels = list(filter(lambda x: channel_filter(x, name), channel_list))
if len(filtered_channels) > 0:
return filtered_channels[0].get("id")
# handle searching for users
members = client.get_member_list(team_id, None)
for i in range(MSTEAMS_MAX_ITERS):
member_list = members.get("members")
continuation_token = members.get("continuationToken")
filtered_members = list(
filter(lambda x: x.get("name").lower() == name.lower(), member_list)
)
if len(filtered_members) > 0:
# TODO: handle duplicate username case
user_id = filtered_members[0].get("id")
tenant_id = filtered_members[0].get("tenantId")
return client.get_user_conversation_id(user_id, tenant_id)
if not continuation_token:
return None
members = client.get_member_list(team_id, continuation_token)
return None
def send_incident_alert_notification(action, incident, metric_value):
from .card_builder import build_incident_attachment
channel = action.target_identifier
integration = action.integration
attachment = build_incident_attachment(incident, metric_value)
client = MsTeamsClient(integration)
try:
client.send_card(channel, attachment)
except ApiError as e:
logger.info("rule.fail.msteams_post", extra={"error": six.text_type(e)})
def get_identity(user, organization_id, integration_id):
try:
organization = Organization.objects.get(id__in=user.get_orgs(), id=organization_id)
except Organization.DoesNotExist:
raise Http404
try:
integration = Integration.objects.get(id=integration_id, organizations=organization)
except Integration.DoesNotExist:
raise Http404
try:
idp = IdentityProvider.objects.get(external_id=integration.external_id, type="msteams")
except IdentityProvider.DoesNotExist:
raise Http404
return organization, integration, idp
def get_preinstall_client(service_url):
# may want try/catch here since this makes an external API call
access_token = get_token_data()["access_token"]
return MsTeamsPreInstallClient(access_token, service_url)
| bsd-3-clause | 6,982,754,459,844,222,000 | 29.966102 | 95 | 0.688013 | false |
shnergle/ShnergleServer | api/util.py | 1 | 5137 | import calendar
import datetime
import functools
import json
import os
import time
import cherrypy
import pyodbc
def connect(thread_index):
cherrypy.thread_data.db = pyodbc.connect(os.environ['DATABASE'])
current_dir = os.path.dirname(os.path.abspath(__file__))
cherrypy.thread_data.placeholder_image = open(os.path.join(current_dir, 'placeholder.png'), 'rb').read()
def dont_cache():
cherrypy.response.headers['Expires'] = datetime.datetime.utcnow().strftime(
'%a, %d %b %Y %H:%M:%S GMT')
cherrypy.response.headers['Cache-Control'] = ('no-store, '
'no-cache, '
'must-revalidate, '
'post-check=0, '
'pre-check=0')
def protect(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if kwargs.pop('app_secret', False) != os.environ['APP_SECRET']:
raise cherrypy.HTTPError(403)
return func(*args, **kwargs)
return decorator
def auth(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if not kwargs.get('facebook_id', False):
raise cherrypy.HTTPError(403)
cursor = kwargs['cursor']
qry = {'select': 'id',
'table': 'users',
'where': 'facebook_id = ?',
'order_by': 'id',
'limit': 1}
cursor.execute(query(**qry), (kwargs['facebook_id'],))
res = cursor.fetchone().id
if not res:
raise cherrypy.HTTPError(403)
kwargs.update(user_id=res)
return func(*args, **kwargs)
return decorator
def jsonp(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
dont_cache()
res = json.dumps(func(*args, **kwargs), separators=(',', ':'),
default=lambda o: str(o))
callback = kwargs.pop('callback', False)
if callback:
cherrypy.response.headers['Content-Type'] = ('text/javascript; '
'charset=utf-8')
res = callback + '(' + res + ');'
else:
cherrypy.response.headers['Content-Type'] = 'application/json'
return res
return decorator
def db(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
cursor = cherrypy.thread_data.db.cursor()
kwargs.update(cursor=cursor)
try:
res = func(*args, **kwargs)
finally:
cursor.commit()
cursor.close()
return res
return decorator
def implode(glue, list):
return list if isinstance(list, str) else glue.join(list)
def query(select=None, table=None, left_join=None, on=None, where=None,
delete=None,
group_by=None, order_by=None, limit=None,
insert_into=None, columns=None,
update=None, set_values=None,
last_id=False):
if select:
qry = 'SELECT ' + implode(', ', select)
if table:
qry += ' FROM ' + implode(', ', table)
if left_join and on:
if isinstance(left_join, str):
left_join = [left_join]
if isinstance(on, str):
on = [on]
for j, o in zip(left_join, on):
qry += ' LEFT JOIN ' + j + ' ON ' + o
if where:
qry += ' WHERE ' + implode(' AND ', where)
if group_by:
qry += ' GROUP BY ' + implode(', ', group_by)
if order_by:
qry += ' ORDER BY ' + implode(', ', order_by)
if limit:
if isinstance(limit, str) or isinstance(limit, int):
qry += ' OFFSET 0 ROWS FETCH NEXT ' + str(limit) + ' ROWS ONLY'
else:
qry += (' OFFSET ' + str(int(limit[0])) + ' ROWS FETCH NEXT ' +
str(int(limit[1])) + ' ROWS ONLY')
elif delete:
qry = 'DELETE FROM ' + delete + ' WHERE ' + implode(' AND ', where)
elif insert_into:
qry = 'INSERT INTO ' + insert_into
if columns:
qry += (' (' + implode(', ', columns) + ')' + ' VALUES (' +
('?' + ', ?' * (len(columns) - 1)) + ')')
elif update:
qry = 'UPDATE ' + update
if set_values:
qry += ' SET ' + implode(' = ?, ', set_values) + ' = ?'
if where:
qry += ' WHERE ' + implode(' AND ', where)
elif last_id:
qry = 'SELECT @@Identity AS [identity]'
return qry
expose = cherrypy.expose
def to_int(value):
return int(value) if value else None
def to_bool(value):
if value is None:
return None
if not value:
return False
if value in ('none', 'false', 'no', 'off', '0'):
return False
return True
def to_float(value):
return float(value) if value else None
def row_to_dict(cursor, row):
return {t[0]: val for t, val in zip(cursor.description, row)}
def now():
return calendar.timegm(datetime.datetime.utcnow().utctimetuple())
| mit | 8,012,379,061,196,236,000 | 30.133333 | 108 | 0.512167 | false |
mathause/regionmask | regionmask/defined_regions/giorgi.py | 1 | 2880 | # F. Giorgi R. Francisco
# Uncertainties in regional climate change prediction: a regional analysis
# of ensemble simulations with the HADCM2 coupled AOGCM
outlines = dict()
outlines[1] = ((110, -45), (155, -45), (155, -11), (110, -11))
outlines[2] = ((-82, -20), (-34, -20), (-34, 12), (-82, 12))
outlines[3] = ((-76, -56), (-40, -56), (-40, -20), (-76, -20))
outlines[4] = ((-116, 10), (-83, 10), (-83, 25), (-85, 25), (-85, 30), (-116, 30))
outlines[5] = ((-130, 30), (-103, 30), (-103, 60), (-130, 60))
outlines[6] = ((-103, 30), (-85, 30), (-85, 50), (-103, 50))
outlines[7] = ((-85, 25), (-60, 25), (-60, 50), (-85, 50))
outlines[8] = ((-170, 60), (-103, 60), (-103, 72), (-170, 72))
outlines[9] = ((-103, 50), (-10, 50), (-10, 85), (-103, 85))
outlines[10] = ((-10, 30), (40, 30), (40, 48), (-10, 48))
outlines[11] = ((-10, 48), (40, 48), (40, 75), (-10, 75))
outlines[12] = ((-20, -12), (22, -12), (22, 18), (-20, 18))
outlines[13] = ((22, -12), (52, -12), (52, 18), (22, 18))
outlines[14] = ((-10, -35), (52, -35), (52, -12), (-10, -12))
outlines[15] = ((-20, 18), (65, 18), (65, 30), (-20, 30))
outlines[16] = ((95, -11), (155, -11), (155, 20), (100, 20), (100, 5), (95, 5))
outlines[17] = ((100, 20), (145, 20), (145, 50), (100, 50))
outlines[18] = ((65, 5), (100, 5), (100, 30), (65, 30))
outlines[19] = ((40, 30), (75, 30), (75, 50), (40, 50))
outlines[20] = ((75, 30), (100, 30), (100, 50), (75, 50))
outlines[21] = ((40, 50), (180, 50), (180, 70), (40, 70))
abbrevs = dict()
abbrevs[1] = "AUS"
abbrevs[2] = "AMZ"
abbrevs[3] = "SSA"
abbrevs[4] = "CAM"
abbrevs[5] = "WNA"
abbrevs[6] = "CNA"
abbrevs[7] = "ENA"
abbrevs[8] = "ALA"
abbrevs[9] = "GRL"
abbrevs[10] = "MED"
abbrevs[11] = "NEU"
abbrevs[12] = "WAF"
abbrevs[13] = "EAF"
abbrevs[14] = "SAF"
abbrevs[15] = "SAH"
abbrevs[16] = "SEA"
abbrevs[17] = "EAS"
abbrevs[18] = "SAS"
abbrevs[19] = "CAS"
abbrevs[20] = "TIB"
abbrevs[21] = "NAS"
names = dict()
names[1] = "Australia"
names[2] = "Amazon Basin"
names[3] = "Southern South America"
names[4] = "Central America"
names[5] = "Western North America"
names[6] = "Central North America"
names[7] = "Eastern North America"
names[8] = "Alaska"
names[9] = "Greenland"
names[10] = "Mediterranean Basin"
names[11] = "Northern Europe"
names[12] = "Western Africa"
names[13] = "Eastern Africa"
names[14] = "Southern Africa"
names[15] = "Sahara"
names[16] = "Southeast Asia"
names[17] = "East Asia"
names[18] = "South Asia"
names[19] = "Central Asia"
names[20] = "Tibet"
names[21] = "North Asia"
# =============================================================================
from ..core.regions import Regions
numbers = range(1, 22)
source = (
"Giorgi and Franciso, 2000 " "(http://link.springer.com/article/10.1007/PL00013733)"
)
giorgi = Regions(
outlines,
numbers=numbers,
names=names,
abbrevs=abbrevs,
name="Giorgi",
source=source,
)
| mit | 7,027,897,503,199,743,000 | 30.648352 | 88 | 0.545833 | false |
WheatonCS/Lexos | lexos/receivers/rolling_window_receiver.py | 1 | 7808 | """This is the receiver for rolling windows analysis model."""
import pandas as pd
from enum import Enum
from typing import NamedTuple, Optional, List
from lexos.receivers.base_receiver import BaseReceiver
from lexos.managers.utility import load_file_manager
class RWATokenType(Enum):
"""This type specify what kind of token (or term) to find in a window."""
string = "string"
regex = "regex"
word = "word"
class WindowUnitType(Enum):
"""This type specify what is the unit of each window.
Say it is letter, each window consist of `window_size` number of letters.
"""
letter = "letter"
word = "word"
line = "line"
class RWAWindowOptions(NamedTuple):
"""The options related to window creation."""
# The size of the window.
window_size: int
# The unit of the window, see WindowUnitType for more detail.
window_unit: WindowUnitType
class RWARatioTokenOptions(NamedTuple):
"""The option if you choose to count by ratio."""
# The type of the token, see RWATokenType for more detail.
token_type: RWATokenType
# The frame saves token count as list of numerator and token count as list
# of denominator.
token_frame: pd.DataFrame
class RWAAverageTokenOptions(NamedTuple):
"""The options if you choose to count by average."""
# The type of the token, see RWATokenType for more detail.
token_type: RWATokenType
# A list of tokens to count.
tokens: List[str]
class RWAPlotOptions(NamedTuple):
"""The option for adjusting plotly result."""
# Show individual points if true.
individual_points: bool
# Return plot in black-white scale if true.
black_white: bool
class RWAFrontEndOptions(NamedTuple):
"""All the options to get from the front end."""
# The options if you choose ratio count,
# it will be None if you did not choose ratio.
ratio_token_options: Optional[RWARatioTokenOptions]
# The option if you choose average count
# it will be None if you did not choose Average.
average_token_options: Optional[RWAAverageTokenOptions]
# The id of the passage to run rolling window.
passage_file_id: int
# The setting related to the windows.
window_options: RWAWindowOptions
# The settings related to the plot result.
plot_options: RWAPlotOptions
# A milestone, it is none if it is not given from frontend.
milestone: Optional[str]
# The color to use
text_color: str
class RollingWindowsReceiver(BaseReceiver):
"""Get all the options to generate rolling windows result."""
def _get_ratio_token_options(self) -> RWARatioTokenOptions:
"""Get all the options to generate ratio count."""
raw_numerator = self._front_end_data['search_term']
raw_denominator = self._front_end_data['search_term_denominator']
if self._front_end_data['input_type'] == 'Strings':
token_type = RWATokenType.string
numerator_token = raw_numerator.split(",")
denominator_token = raw_denominator.split(",")
elif self._front_end_data['input_type'] == 'Regex':
token_type = RWATokenType.regex
numerator_token = raw_numerator.split(",")
denominator_token = raw_denominator.split(",")
elif self._front_end_data['input_type'] == 'Words':
token_type = RWATokenType.word
numerator_token = [token.strip()
for token in raw_numerator.split(",")]
denominator_token = [token.strip()
for token in raw_denominator.split(",")]
else:
raise ValueError("invalid token type from front end")
# Pack data in a data frame.
token_frame = pd.DataFrame(
data={
"numerator": numerator_token,
"denominator": denominator_token,
}
)
return RWARatioTokenOptions(token_type=token_type,
token_frame=token_frame)
def _get_average_token_options(self) -> RWAAverageTokenOptions:
"""Get all the options to generate average count."""
# the unprocessed token
raw_token = self._front_end_data['search_term']
if self._front_end_data['input_type'] == 'Strings':
token_type = RWATokenType.string
tokens = raw_token.split(',')
elif self._front_end_data['input_type'] == 'Regex':
token_type = RWATokenType.regex
tokens = raw_token.split(',')
elif self._front_end_data['input_type'] == 'Words':
token_type = RWATokenType.word
tokens = [token.strip() for token in raw_token.split(',')]
else:
raise ValueError("invalid token type from front end")
return RWAAverageTokenOptions(token_type=token_type, tokens=tokens)
def _get_window_option(self) -> RWAWindowOptions:
"""Get all the option for windows."""
if self._front_end_data['window_type'] == 'Characters':
window_unit = WindowUnitType.letter
elif self._front_end_data['window_type'] == 'Words':
window_unit = WindowUnitType.word
elif self._front_end_data['window_type'] == 'Lines':
window_unit = WindowUnitType.line
else:
raise ValueError("invalid window unit from front end")
window_size = int(self._front_end_data['window_size'])
return RWAWindowOptions(window_size=window_size,
window_unit=window_unit)
def _get_milestone(self) -> Optional[List[str]]:
"""Get the milestone string from front end and split it into words."""
if 'enable_milestone' not in self._front_end_data:
return None
else:
raw_mile_stones = self._front_end_data['milestone']
return [mile_stone.strip()
for mile_stone in raw_mile_stones.split(",")]
def _get_passage_file_id(self) -> int:
"""Get the file id for the passage to run rolling window."""
return load_file_manager().get_active_files()[0].id
def _get_plot_option(self) -> RWAPlotOptions:
"""Get the plot option from front end."""
individual_points = True if 'show_points' \
in self._front_end_data else False
black_white = True if 'black_and_white' \
in self._front_end_data else False
return RWAPlotOptions(individual_points=individual_points,
black_white=black_white)
def options_from_front_end(self) -> RWAFrontEndOptions:
"""Pack all the front end options together."""
if self._front_end_data['calculation_type'] == 'Rolling Ratio':
return RWAFrontEndOptions(
average_token_options=None,
ratio_token_options=self._get_ratio_token_options(),
window_options=self._get_window_option(),
plot_options=self._get_plot_option(),
milestone=self._get_milestone(),
passage_file_id=self._get_passage_file_id(),
text_color=self._front_end_data["text_color"]
)
elif self._front_end_data['calculation_type'] == 'Rolling Average':
return RWAFrontEndOptions(
average_token_options=self._get_average_token_options(),
ratio_token_options=None,
window_options=self._get_window_option(),
plot_options=self._get_plot_option(),
milestone=self._get_milestone(),
passage_file_id=self._get_passage_file_id(),
text_color=self._front_end_data["text_color"]
)
else:
raise ValueError("invalid count type from front end")
| mit | 428,797,673,821,050,750 | 34.981567 | 78 | 0.615907 | false |
alexandresobolevski/yahoo_ff | yahoo_ff/tools/scrapingTools.py | 1 | 2687 | from urllib.request import urlopen
import time
import numpy as np
BASE_URL = 'https://ca.finance.yahoo.com/'
powers = {'%': 10 ** (-2), 'M': 10 ** 6, 'B': 10 ** 9, 'T': 10 ** 12}
def getUnixTime (dateTime):
return int(time.mktime(dateTime.timetuple()))
def parse_powers(x):
power = x[-1]
if (power in powers.keys()):
return float(x[:-1]) * powers[power]
else :
return x
def float_or_none(x):
x = x.replace(',','')
try:
# if negative value (1000)
if x[0]=='(' and x[-1]==')':
return -float(x[1:-2])
else:
return float(x)
except: return None
def scrape_report(source_code, information):
return parse_table(find_section(source_code, information))
def get_annual_is_url(stock):
return BASE_URL + '/q/is?s=' + stock + '&annual'
def get_quarterly_is_url(stock):
return BASE_URL + '/q/is?s=' + stock
def get_annual_bs_url(stock):
return BASE_URL + '/q/bs?s=' + stock + '&annual'
def get_quarterly_bs_url(stock):
return BASE_URL + '/q/bs?s=' + stock
def get_annual_cf_url(stock):
return BASE_URL + '/q/cf?s=' + stock + '&annual'
def get_quarterly_cf_url(stock):
return BASE_URL + '/q/cf?s=' + stock
def get_stockinfo_url(stock):
return BASE_URL + '/q/pr?s=' + stock + '+Profile'
def get_keystats_url(stock):
return BASE_URL + '/q/ks?s=' + stock
def get_source_code(url):
return urlopen(url).read().decode()
def parse_table(source_code):
source_code = source_code.split('</td></tr>')[0]
source_code = source_code.replace('<strong>', '')
source_code = source_code.replace('</strong>', '')
source_code = source_code.replace('\n', '')
source_code = source_code.replace(' ', '')
source_code = source_code.replace('<td align="right">','')
source_code = source_code.replace(' ', '')
source_code = source_code.split('</td>')
source_code = filter(None, source_code)
return [float_or_none(x.replace(',', '')) for x in source_code]
def find_section(source_code, section_name):
try:
return source_code.split(section_name)[1]
except:
print('failed acquiring ' + section_name)
def scrape_company_infos(source_code, field):
return [source_code.split(field+':')[1].split('</td>')[1].replace('</a>','').split('>')[-1]]
def scrape_key_stats(source_code, field):
try:
return [parse_powers(source_code.split(field)[1].split('</td></tr>')[0].replace('</span>', '').split('>')[-1])]
except:
return [np.nan]
def get_current_price(source_code):
return {'Price': [float_or_none(source_code.split('time_rtq_ticker')[1].split('span')[1].split('>')[1].split('<')[0])]}
| mit | 3,454,508,166,530,037,000 | 29.885057 | 123 | 0.599181 | false |
totoro-zhang/hello-word | spider/distributespider/myfirstSpider_URLManager.py | 1 | 2465 | #coding:utf-8
import _compat_pickle
import hashlib
class UrlManager(object):
def __init__(self):
#self.new_urls = set()
#self.old_urls = set()
self.new_urls = self.load_progress('new_urls.txt')#未爬取的url集合
self.old_urls = self.load_progress('old_urls.txt')#已爬取的URL集合
def has_new_url(self):
'''
判断是否有未爬取的URL
:return:
'''
return self.new_url_size() != 0
def get_new_url(self):
'''
获取一个未爬取的URL
:return:
'''
new_url = self.new_urls.pop()
m = hashlib.md5()
m.update(new_url.encode("utf8"))
self.old_urls.add(m.hexdigest()[8:-8])
return new_url
def add_new_url(self,url):
'''
将新的URL添加到未爬取的URL集合中
:return:
'''
if url is None:
return
m = hashlib.md5()
m.update(url.encode("utf8"))
url_md5 = m.hexdigest()[8:-8]
if url not in self.new_urls and url_md5 not in self.old_urls:
self.new_urls.add(url)
def add_new_urls(self,urls):
'''
将新的URLs添加到未爬取的URL集合中
:param urls :url 集合
:param urls:
:return:
'''
if urls is None or len(urls)== 0 :
return
for url in urls:
self.add_new_url(url)
def new_url_size(self):
'''
获取url集合大小
:return:
'''
return len(self.new_urls)
def old_url_size(self):
'''
获取已经爬取的URL集合的大小
:return:
'''
return len(self.old_urls)
def save_progress(selfs,path,data):
'''
:param path:文件路径
:param data: 数据
:return:
'''
with open(path,'wb') as f:
_compat_pickle.dump(data,f)
def load_progress(self,path):
'''
从本地加载文件进度
:param path: 文件路径
:return: 返回set集合
'''
print('[+] 从文件加载进度: %s'%path)
try:
with open(path,'rb') as f:
tmp = _compat_pickle.load(f)
return tmp
except:
print('[!]无进度文件,创建:%s'%path)
return set()
| unlicense | -7,736,062,366,020,673,000 | 22.833333 | 69 | 0.46264 | false |
leon-adams/datascience | algorithms/k_nearest_neighbor.py | 1 | 4659 | import numpy as np
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
# Compute the l2 distance between the ith test point and the jth training point
dists[i, j] = np.sqrt( np.sum((X[i] - self.X_train[j])**2) )
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
dists[i, :] = np.sqrt( np.sum( (X[i] - self.X_train)**2, axis=1 ) )
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
test_sq = np.sum(X**2, axis=1, keepdims=True)
train_sq = np.sum(self.X_train**2, axis=1)
cross = np.dot(X, self.X_train.transpose())
dists = np.sqrt(test_sq -2*cross + train_sq)
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in xrange(num_test):
#########################################################################
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. #
distances_from_i_test = dists[i].argsort()
closest_y = self.y_train[distances_from_i_test][:k]
closest_y.sort()
(distance, count) = np.unique(closest_y, return_counts=True)
ind = np.argmax(count)
y_pred[i] = distance[ind]
return y_pred
| mpl-2.0 | 1,120,641,967,713,897,000 | 33.768657 | 85 | 0.623095 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storagesync/azure/mgmt/storagesync/aio/operations/_operation_status_operations.py | 1 | 5447 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationStatusOperations:
"""OperationStatusOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storagesync.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
location_name: str,
workflow_id: str,
operation_id: str,
**kwargs
) -> "_models.OperationStatus":
"""Get Operation status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param location_name: The desired region to obtain information from.
:type location_name: str
:param workflow_id: workflow Id.
:type workflow_id: str
:param operation_id: operation Id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatus, or the result of cls(response)
:rtype: ~azure.mgmt.storagesync.models.OperationStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
'workflowId': self._serialize.url("workflow_id", workflow_id, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageSyncError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-correlation-request-id']=self._deserialize('str', response.headers.get('x-ms-correlation-request-id'))
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/locations/{locationName}/workflows/{workflowId}/operations/{operationId}'} # type: ignore
| mit | 5,413,257,254,874,317,000 | 48.072072 | 217 | 0.665504 | false |
thomasyu888/synapsePythonClient | setup.py | 1 | 4360 | # Installation script for Synapse Client for Python
############################################################
import sys
import os
import platform
import setuptools
import json
# check Python version, before we do anything
if sys.version_info.major < 3 and sys.version_info.minor < 6:
sys.stderr.write("The Synapse Client for Python requires Python 3.6+\n")
sys.stderr.write("Your Python appears to be version %d.%d.%d\n" % sys.version_info[:3])
sys.exit(-1)
# figure out the version
__version__ = json.loads(open('synapseclient/synapsePythonClient').read())['latestVersion']
description = """A client for Synapse, a collaborative compute space
that allows scientists to share and analyze data together.""".replace("\n", " ")
with open("README.md", "r") as fh:
long_description = fh.read()
# make sure not to overwrite existing .synapseConfig with our example one
data_files =\
[(os.path.expanduser('~'), ['synapseclient/.synapseConfig'])]\
if not os.path.exists(os.path.expanduser('~/.synapseConfig'))\
else []
test_deps = [
"pytest>=5.0.0,<7.0",
"pytest-mock>=3.0,<4.0",
"flake8>=3.7.0,<4.0",
"pytest-xdist[psutil]>=2.2,<3.0.0",
]
install_requires = [
'requests>=2.22.0,<3.0',
'keyring==12.0.2',
'deprecated>=1.2.4,<2.0',
]
# on Linux specify a cryptography dependency that will not
# require a Rust compiler to compile from source (< 3.4).
# on Linux cryptography is a transitive dependency
# (keyring -> SecretStorage -> cryptography)
# SecretStorage doesn't pin a version so otherwise if cryptography
# is not already installed the dependency will resolve to the latest
# and will require Rust if a precompiled wheel cannot be used
# (e.g. old version of pip or no wheel available for an architecture).
# if a newer version of cryptography is already installed that is
# fine we don't want to trigger a downgrade, hence the conditional
# addition of the versioned dependency.
if platform.system() == 'Linux':
try:
import cryptography # noqa
# already installed, don't need to install (or downgrade)
except ImportError:
install_requires.append('cryptography<3.4')
setuptools.setup(
# basic
name='synapseclient',
version=__version__,
packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
# requirements
python_requires='>=3.6.*',
install_requires=install_requires,
extras_require={
'pandas': ["pandas>=0.25.0,<2.0"],
'pysftp': ["pysftp>=0.2.8,<0.3"],
'boto3': ["boto3>=1.7.0,<2.0"],
'docs': ["sphinx>=3.0,<4.0", "sphinx-argparse>=0.2,<0.3"],
'tests': test_deps,
':sys_platform=="linux"': ['keyrings.alt==3.1'],
},
# command line
entry_points={
'console_scripts': ['synapse = synapseclient.__main__:main']
},
# data
package_data={'synapseclient': ['synapsePythonClient', '.synapseConfig']},
data_files=data_files,
zip_safe=False,
# test
tests_require=test_deps,
# metadata to display on PyPI
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
url='https://www.synapse.org',
author='The Synapse Engineering Team',
author_email='[email protected]',
license='Apache',
project_urls={
"Documentation": "https://python-docs.synapse.org",
"Source Code": "https://github.com/Sage-Bionetworks/synapsePythonClient",
"Bug Tracker": "https://github.com/Sage-Bionetworks/synapsePythonClient/issues",
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
)
| apache-2.0 | 8,091,647,534,263,639,000 | 34.737705 | 91 | 0.642431 | false |
moden-py/SWAPY | swapy-ob.py | 1 | 1727 | # GUI object/properties browser.
# Copyright (C) 2011 Matiychuk D.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
#Boa:App:BoaApp
import sys
import traceback
import wx
import _mainframe
import tools
def hook(exctype, value, tb):
"""
Handle all unexpected exceptions. Show the msgbox then close main window.
"""
traceback_text = ''.join(traceback.format_exception(exctype, value, tb, 5))
tools.show_error_message('ERROR', traceback_text)
if not __debug__:
# Catch all unhandled exceptions and show the details in a msgbox.
sys.excepthook = hook
modules ={'_mainframe': [0, '', '_mainframe.py'], 'proxy': [0, '', 'proxy.py']}
class BoaApp(wx.App):
def OnInit(self):
self.main = _mainframe.create(None)
self.main.Center()
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
application = BoaApp(0)
application.MainLoop()
if __name__ == '__main__':
main()
| lgpl-2.1 | -9,084,540,459,404,032,000 | 25.412698 | 79 | 0.665316 | false |
blindtex/blindtex | blindtex/iotools/iotools.py | 1 | 5776 | #-*-:coding:utf-8-*-
import os
import copy
import json
import string
import subprocess
#from blindtex import mainBlindtex
import sys
from sys import argv
#HU1
#Method to open a file and return its content as a string.
def openFile(fileName):
'''This function takes a file a return its content as a string.
Args:
fileName(str): The name of the file to be oppened.
Returns:
str: The content of the file.'''
try:
myFile = open(fileName)
stringDocument = myFile.read()
myFile.close()
return stringDocument
except IOError:
print("File %s could not be openned."%(fileName))
return ""
#EndOfFunction
def read_json_file(fileName):
'''This function takes a file a return its content as a string.
Args:
fileName(str): The name of the file to be oppened.
Returns:
str: The content of the file.'''
try:
with open(fileName,'r') as myFile:
stringDocument = json.load(myFile)
except OSError as err:
print("OS error: {0}".format(err))
raise
except ValueError:
print("Could not parser",fileName,"file, please check json syntax.")
raise
except:
print("Unexpected error:", sys.exc_info()[0])
raise
return stringDocument
#Replace the document containing the LaTeX math with the output of the function seekAndReplace. Write the content in a new file.
def replaceAndWrite(contentList, replacedDocument, fileName):
'''Replace the document containing the LaTeX math with the output of the function seekAndReplace. Write the content in a new file.
Args:
contentList(list[str,str,str]): The list generated by extractContent.
replacedDocument(str): the LaTeX content without formulas, just markers.
fileName(str): The name of the .tex file where the result will be written. '''
newContentList = copy.deepcopy(contentList)
newContentList[1] = replacedDocument
try:
myFile = open(fileName, 'w')#TODO Check if the file already exits, warn about that and decide if the user wants to replace it.
myFile.write(string.join(newContentList))
myFile.close()
except IOError:
print("File could not be oppened.")
return
#EndOfFunction
def convertToHtml(fileName, biblioName=None):
'''This function uses LaTeXML to convert a .tex file in a html with accesible math formulas.
Args:
fileName(str): the name of the .tex file to be processed.
(opt)biblioName(str): the name o a .bib file. '''
noExtensionName = fileName.replace(".tex","")
if(biblioName):
if(os.name == 'nt'): #i.e is in windows
noExtensionBiblio = biblioName.replace(".bib","")
subprocess.call(["latexml","--dest=%s.xml"%(noExtensionName),"--quiet",fileName], shell=True)
subprocess.call(["latexml", "--dest=%s.xml"%(noExtensionBiblio),"--bibtex", biblioName], shell= True)
subprocess.call(["latexmlpost","-dest=%s.xhtml"%(noExtensionName),"--bibliography=%s.xml"%(noExtensionBiblio),noExtensionName+".xml"], shell=True)
else: #TODO: Do not repeat
noExtensionBiblio = biblioName.replace(".bib","")
subprocess.call(["latexml","--dest=%s.xml"%(noExtensionName),"--quiet",fileName])
subprocess.call(["latexml", "--dest=%s.xml"%(noExtensionBiblio),"--bibtex", biblioName])
subprocess.call(["latexmlpost","-dest=%s.xhtml"%(noExtensionName),"--bibliography=%s.xml"%(noExtensionBiblio),noExtensionName+".xml"])
else:
if(os.name == 'nt'):
subprocess.call(["latexml","--dest=%s.xml"%(noExtensionName),"--quiet",fileName], shell = True)#Generates xml file.
subprocess.call(["latexmlpost","-dest=%s.xhtml"%(noExtensionName),noExtensionName+".xml"], shell = True)#Generates xhtml file.
else:
subprocess.call(["latexml","--dest=%s.xml"%(noExtensionName),"--quiet",fileName])#Generates xml file.
subprocess.call(["latexmlpost","-dest=%s.xhtml"%(noExtensionName),noExtensionName+".xml"])#Generates xhtml file.
#EndOfFunction
def convertToPdf(filePath,fileName):
if(os.name == 'nt'):
subprocess.call(['pdflatex','-output-directory',filePath, fileName], shell = True)
subprocess.call(['pdflatex','-output-directory',filePath, fileName], shell = True)
else:
subprocess.call(['pdflatex','-output-directory',filePath, fileName])
subprocess.call(['pdflatex','-output-directory',filePath, fileName])
#EndOfFunction
#TODO ¿con alguna extensión o la extensión se da desde afuera?
def writeHtmlFile(htmlString, fileName):
'''Function to write the html result in a final file.
Args:
htmlString(str): The string with the html content of the final result.
fileName(str): The name of the file where the string will be written. '''
try:
htmlFile = open(fileName,'w')
htmlFile.write(htmlString)
htmlFile.close()
except IOError:
print('File could not be oppened.')
return
#EndOf Function
#This function works just when a .tex file is being converted.
def writeTroubles(strfileName, listtroubleFormulas):
(filePath, name) = os.path.split(strfileName)
try:
registerFile = open(os.path.join(filePath, 'TroubleFormulasOf'+name.replace('.tex','.txt')),'w')
for formula in listtroubleFormulas:
registerFile.write('I had troubles with:\n'+formula+'\n')
registerFile.close()
except IOError:
return
#EndOfFunction
| gpl-3.0 | -1,899,846,381,258,995,200 | 39.65493 | 158 | 0.639182 | false |
fanchao01/pythontools | Queue.py | 1 | 4492 | #!/bin/env python
#-*- encoding: utf-8 -*-
__author__ = "fanchao01"
__version__ = "0.0.1"
'''multi-thread queue likes Queue.queue'''
import threading as _threading
import time as _time
class Full(Exception):
"""Exception Full raised by Queue.put/put_nowait"""
class Empty(Exception):
"""Exception Empty raised by Queue.get/get_nowait"""
class Queue(object):
def __init__(self, maxsize=0):
self.maxsize = maxsize
self.queue = []
#one lock with three condition-waiting queue
self.mutex = _threading.Lock()
self.not_full = _threading.Condition(self.mutex)
self.not_empty = _threading.Condition(self.mutex)
self.all_tasks_done = _threading.Condition(self.mutex)
self.un_finished_tasks = 0
def clear(self):
with self.mutex as lock:
self.queue = []
self.not_full.notify_all()
def task_done(self):
with self.all_tasks_done as condition:
unfinished = self.un_finished_tasks - 1
if unfinished < 0:
raise ValueError("task_done() called too many times")
elif unfinished == 0:
self.all_tasks_done.notify_all()
self.un_finished_tasks = unfinished
def join(self):
with self.all_tasks_done as condition:
while self.un_finished_tasks > 0:
self.all_tasks_done.wait()
def qsize(self):
with self.mutex as lock:
return self._qsize()
def _qsize(self): #there must be a way to get the size of self.queue without lock
return len(self.queue)
def full(self):
with self.mutex as lock:
return self.qsize() >= self.maxsize if self.maxsize > 0 else False
def empty(self):
with self.mutex as lock:
return self.qsize() <= 0
def _put(self, ele):
self.queue.append(ele)
self.un_finished_tasks += 1
def put(self, ele, block=True, timeout=None):
with self.not_full as condition:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize: #can not use self.qssize(), which will relock the self.mutex leading to deadlock
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("timeout must be >0, given(%d)" % timeout)
else:
end = _time.time() + timeout
while self._qsize() >= self.maxsize:
remaining = end - _time.time()
if remaining < 0.0:
raise Full
self.not_full.wait(remaining)
self._put(ele)
self.not_empty.notify()
def put_nowait(self, ele):
self.put(ele, False)
def _get(self):
return self.queue.pop(0)
def get(self, block=True, timeout=None):
with self.not_empty as condition:
if not block:
if self._qsize() == 0:
raise Empty
elif timeout is None:
while self._qsize() == 0:
self.not_empty.wait()
elif timeout < 0:
raise ValueError("timeout must be > 0, given(%d)" % timeout)
else:
end = _time.time() + timeout
while self._qsize() == 0:
remaining = end - _time.time()
if remaining < 0.0:
raise Empty
self.not_empty.wait(remaining)
ele = self._get()
self.not_full.notify()
return ele
def get_notwait(self):
self.get(False)
if __name__ == "__main__":
import random
import time
class Worker(_threading.Thread):
def __init__(self, queue):
super(Worker, self).__init__()
self.queue = queue
def run(self):
time.sleep(random.randint(1, 5) / 10.0)
print self.queue.get()
q = Queue(10)
for i in range(10):
q.put(i)
try:
q.put(11, True, 1)
except Full:
pass
try:
q.put_nowait(11)
except Full:
pass
for i in range(10):
Worker(q).start()
q.task_done()
w = Worker(q)
w.start()
q.put(10)
| gpl-2.0 | 4,892,882,358,093,603,000 | 27.43038 | 134 | 0.511131 | false |
FilWisher/distributed-project | icarus/icarus/results/visualize.py | 1 | 3641 | """Functions for visualizing results on graphs of topologies"""
from __future__ import division
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
__all__ = [
'draw_stack_deployment',
'draw_network_load',
]
# Colormap for node stacks
COLORMAP = {'source': 'blue',
'receiver': 'green',
'router': 'white',
'cache': 'red',
}
def stack_map(topology):
"""Return dict mapping node ID to stack type
Parameters
----------
topology : Topology
The topology
Returns
-------
stack_map : dict
Dict mapping node to stack. Options are:
source | receiver | router | cache
"""
stack = {}
for v, (name, props) in topology.stacks().items():
if name == 'router':
cache = False
if 'cache_size' in props and props['cache_size'] > 0:
cache = True
elif cache:
name = 'cache'
else:
name = 'router'
stack[v] = name
return stack
def draw_stack_deployment(topology, filename, plotdir):
"""Draw a topology with different node colors according to stack
Parameters
----------
topology : Topology
The topology to draw
plotdir : string
The directory onto which draw plots
filename : string
The name of the image file to save
"""
stack = stack_map(topology)
node_color = [COLORMAP[stack[v]] for v in topology.nodes_iter()]
plt.figure()
nx.draw_graphviz(topology, node_color=node_color, with_labels=False)
plt.savefig(plt.savefig(os.path.join(plotdir, filename), bbox_inches='tight'))
def draw_network_load(topology, result, filename, plotdir):
"""Draw topology with node colors according to stack and node size and link
color according to server/cache hits and link loads.
Nodes are colored according to COLORMAP. Edge are colored on a blue-red
scale where blue means min link load and red means max link load.
Sources and caches have variable size proportional to their hit ratios.
Parameters
----------
topology : Topology
The topology to draw
result : Tree
The tree representing the specific experiment result from which metric
are read
plotdir : string
The directory onto which draw plots
filename : string
The name of the image file to save
"""
stack = stack_map(topology)
node_color = [COLORMAP[stack[v]] for v in topology.nodes_iter()]
node_min = 50
node_max = 600
hits = result['CACHE_HIT_RATIO']['PER_NODE_CACHE_HIT_RATIO'].copy()
hits.update(result['CACHE_HIT_RATIO']['PER_NODE_SERVER_HIT_RATIO'])
hits = np.array([hits[v] if v in hits else 0 for v in topology.nodes_iter()])
min_hits = np.min(hits)
max_hits = np.max(hits)
hits = node_min + (node_max - node_min)*(hits - min_hits)/(max_hits - min_hits)
link_load = result['LINK_LOAD']['PER_LINK_INTERNAL'].copy()
link_load.update(result['LINK_LOAD']['PER_LINK_EXTERNAL'])
link_load = [link_load[e] if e in link_load else 0 for e in topology.edges()]
plt.figure()
nx.draw_graphviz(topology, node_color=node_color, node_size=hits,
width=2.0,
edge_color=link_load,
edge_cmap=mpl.colors.LinearSegmentedColormap.from_list('bluered',['blue','red']),
with_labels=False)
plt.savefig(plt.savefig(os.path.join(plotdir, filename), bbox_inches='tight'))
| mit | -1,813,796,021,012,025,600 | 31.508929 | 102 | 0.608075 | false |
jessada/pyCMM | setup.py | 1 | 4017 | import sys
import glob
import pkgutil
import os
import fnmatch
from setuptools import setup
from pycmm.settings import DNASEQ_SLURM_MONITOR_PIPELINE_BIN
from pycmm.settings import DUMMY_TABLE_ANNOVAR_BIN
from pycmm.settings import MUTREP_SLURM_MONITOR_PIPELINE_BIN
from pycmm.settings import MUTREP_FAMILY_REPORT_BIN
from pycmm.settings import MUTREP_SUMMARY_REPORT_BIN
from pycmm.settings import MUTREPDB_SEQ_REPORT_BIN
from pycmm.settings import PLINK_SLURM_MONITOR_PIPELINE_BIN
from pycmm.settings import PLINK_HAP_ASSOCS_REPORT_BIN
from pycmm.settings import PLINK_MERGE_HAP_ASSOCS_BIN
from pycmm.settings import DBMS_EXECUTE_DB_JOBS_BIN
def opj(*args):
path = os.path.join(*args)
return os.path.normpath(path)
def find_data_files(srcdir, *wildcards, **kw):
# get a list of all files under the srcdir matching wildcards,
# returned in a format to be used for install_data
def walk_helper(arg, dirname, files):
if '.svn' in dirname:
return
names = []
lst, wildcards = arg
for wc in wildcards:
wc_name = opj(dirname, wc)
for f in files:
filename = opj(dirname, f)
if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):
names.append(filename)
if names:
lst.append( (dirname, names ) )
file_list = []
recursive = kw.get('recursive', True)
if recursive:
os.path.walk(srcdir, walk_helper, (file_list, wildcards))
else:
walk_helper((file_list, wildcards),
srcdir,
[os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))])
return file_list
#csv_files = find_data_files('data/', '*.csv')
all_data_files = find_data_files('data/', '*.*')
#all_data_files = find_data_files('script/', '*.*')
setup(
name='pyCMM',
version='0.0.1',
author='Jessada Thutkawkorapin',
author_email='[email protected]',
packages=['pycmm',
'pycmm.app',
'pycmm.utils',
'pycmm.cmmlib',
'pycmm.flow',
'pycmm.proc',
'pycmm.proc.db',
'pycmm.proc.mutrep',
],
scripts=['bin/'+DNASEQ_SLURM_MONITOR_PIPELINE_BIN,
'bin/pyCMM-dnaseq-pipeline',
'bin/pyCMM-dnaseq-create-job-setup-file',
'bin/pyCMM-cmmdb-cal-mut-stat',
'bin/pyCMM-cmmdb-vcf-AF-to-annovar',
'bin/pyCMM-cmmdb-table-annovar',
'bin/pyCMM-cmmdb-create-job-setup-file',
'bin/'+DUMMY_TABLE_ANNOVAR_BIN,
'bin/'+MUTREP_SLURM_MONITOR_PIPELINE_BIN,
'bin/pyCMM-mutrep-pipeline',
'bin/pyCMM-mutrep-mutation-reports',
'bin/'+MUTREP_FAMILY_REPORT_BIN,
'bin/'+MUTREP_SUMMARY_REPORT_BIN,
'bin/pyCMM-mutrep-create-job-setup-file',
'bin/pyCMM-mutrepdb-create-job-setup-file',
'bin/'+MUTREPDB_SEQ_REPORT_BIN,
'bin/pyCMM-mutrepdb-controller',
'bin/pyCMM-plink-create-job-setup-file',
'bin/pyCMM-plink-pipeline',
'bin/'+PLINK_SLURM_MONITOR_PIPELINE_BIN,
'bin/'+PLINK_HAP_ASSOCS_REPORT_BIN,
'bin/'+PLINK_MERGE_HAP_ASSOCS_BIN,
'bin/pyCMM-dbms-controller',
'bin/pyCMM-dbms-create-job-setup-file',
'bin/'+DBMS_EXECUTE_DB_JOBS_BIN,
],
package=['pyCMM'],
# package_data={'': ['data/CBV/*.cbv']
# },
data_files=all_data_files,
url='http://pypi.python.org/pypi/pyCMM/',
license='LICENSE.txt',
description='Python packages for my sequencing data analysis at Center of Molecular Medicine, Karolinska Institute, Stockholm, Sweden',
long_description=open('README.md').read(),
install_requires=[
"pysam >= 0.7",
"pyvcf >= 0.6.0",
"pyaml >= 15.5.7",
"openpyxl >= 2.3.3",
"xlsxwriter >= 0.5.3",
],
)
| gpl-2.0 | -6,294,524,803,636,695,000 | 35.853211 | 139 | 0.591735 | false |
bowen0701/algorithms_data_structures | lc0240_search_a_2d_matrix_ii.py | 1 | 1900 | """Leetcode 240. Search a 2D Matrix II
URL: https://leetcode.com/problems/search-a-2d-matrix-ii/
Medium
Write an efficient algorithm that searches for a value in an m x n matrix.
This matrix has the following properties:
- Integers in each row are sorted in ascending from left to right.
- Integers in each column are sorted in ascending from top to bottom.
Example:
Consider the following matrix:
[
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
Given target = 5, return true.
Given target = 20, return false.
"""
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
Time complexity: O(m+n), where
- m is the row number, and
- n is the column number.
Space complexity: O(1).
"""
if not len(matrix) or not len(matrix[0]):
return False
# Search starting from the bottom-left, moving to top/right.
i, j = len(matrix) - 1, 0
while i >= 0 and j < len(matrix[0]):
if matrix[i][j] == target:
return True
elif matrix[i][j] > target:
# If entry is bigger than target, decrease next entry.
i -= 1
elif matrix[i][j] < target:
# If entry is smaller than target, increase next entry.
j += 1
return False
def main():
matrix = [
[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]
]
target = 5 # Should be True.
print Solution().searchMatrix(matrix, target)
target = 20 # Should be False.
print Solution().searchMatrix(matrix, target)
if __name__ == '__main__':
main()
| bsd-2-clause | -1,983,297,366,930,088,000 | 24.675676 | 75 | 0.543158 | false |
angadpc/Alexa-Project- | twilio/rest/chat/v1/service/__init__.py | 1 | 49759 | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.chat.v1.service.channel import ChannelList
from twilio.rest.chat.v1.service.role import RoleList
from twilio.rest.chat.v1.service.user import UserList
class ServiceList(ListResource):
def __init__(self, version):
"""
Initialize the ServiceList
:param Version version: Version that contains the resource
:returns: twilio.rest.ip_messaging.v1.service.ServiceList
:rtype: twilio.rest.ip_messaging.v1.service.ServiceList
"""
super(ServiceList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Services'.format(**self._solution)
def create(self, friendly_name):
"""
Create a new ServiceInstance
:param unicode friendly_name: The friendly_name
:returns: Newly created ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
)
def stream(self, limit=None, page_size=None):
"""
Streams ServiceInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.ServiceInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists ServiceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.service.ServiceInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ServiceInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServicePage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ServicePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __call__(self, sid):
"""
Constructs a ServiceContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
return ServiceContext(
self._version,
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V1.ServiceList>'
class ServicePage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ServicePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.ip_messaging.v1.service.ServicePage
:rtype: twilio.rest.ip_messaging.v1.service.ServicePage
"""
super(ServicePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ServiceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.ip_messaging.v1.service.ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return ServiceInstance(
self._version,
payload,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Chat.V1.ServicePage>'
class ServiceContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the ServiceContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.service.ServiceContext
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
super(ServiceContext, self).__init__(version)
# Path Solution
self._solution = {
'sid': sid,
}
self._uri = '/Services/{sid}'.format(**self._solution)
# Dependents
self._channels = None
self._roles = None
self._users = None
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
webhooks_on_message_send_url=values.unset,
webhooks_on_message_send_method=values.unset,
webhooks_on_message_send_format=values.unset,
webhooks_on_message_update_url=values.unset,
webhooks_on_message_update_method=values.unset,
webhooks_on_message_update_format=values.unset,
webhooks_on_message_remove_url=values.unset,
webhooks_on_message_remove_method=values.unset,
webhooks_on_message_remove_format=values.unset,
webhooks_on_channel_add_url=values.unset,
webhooks_on_channel_add_method=values.unset,
webhooks_on_channel_add_format=values.unset,
webhooks_on_channel_destroy_url=values.unset,
webhooks_on_channel_destroy_method=values.unset,
webhooks_on_channel_destroy_format=values.unset,
webhooks_on_channel_update_url=values.unset,
webhooks_on_channel_update_method=values.unset,
webhooks_on_channel_update_format=values.unset,
webhooks_on_member_add_url=values.unset,
webhooks_on_member_add_method=values.unset,
webhooks_on_member_add_format=values.unset,
webhooks_on_member_remove_url=values.unset,
webhooks_on_member_remove_method=values.unset,
webhooks_on_member_remove_format=values.unset,
webhooks_on_message_sent_url=values.unset,
webhooks_on_message_sent_method=values.unset,
webhooks_on_message_sent_format=values.unset,
webhooks_on_message_updated_url=values.unset,
webhooks_on_message_updated_method=values.unset,
webhooks_on_message_updated_format=values.unset,
webhooks_on_message_removed_url=values.unset,
webhooks_on_message_removed_method=values.unset,
webhooks_on_message_removed_format=values.unset,
webhooks_on_channel_added_url=values.unset,
webhooks_on_channel_added_method=values.unset,
webhooks_on_channel_added_format=values.unset,
webhooks_on_channel_destroyed_url=values.unset,
webhooks_on_channel_destroyed_method=values.unset,
webhooks_on_channel_destroyed_format=values.unset,
webhooks_on_channel_updated_url=values.unset,
webhooks_on_channel_updated_method=values.unset,
webhooks_on_channel_updated_format=values.unset,
webhooks_on_member_added_url=values.unset,
webhooks_on_member_added_method=values.unset,
webhooks_on_member_added_format=values.unset,
webhooks_on_member_removed_url=values.unset,
webhooks_on_member_removed_method=values.unset,
webhooks_on_member_removed_format=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode webhooks_on_message_send_url: The webhooks.on_message_send.url
:param unicode webhooks_on_message_send_method: The webhooks.on_message_send.method
:param unicode webhooks_on_message_send_format: The webhooks.on_message_send.format
:param unicode webhooks_on_message_update_url: The webhooks.on_message_update.url
:param unicode webhooks_on_message_update_method: The webhooks.on_message_update.method
:param unicode webhooks_on_message_update_format: The webhooks.on_message_update.format
:param unicode webhooks_on_message_remove_url: The webhooks.on_message_remove.url
:param unicode webhooks_on_message_remove_method: The webhooks.on_message_remove.method
:param unicode webhooks_on_message_remove_format: The webhooks.on_message_remove.format
:param unicode webhooks_on_channel_add_url: The webhooks.on_channel_add.url
:param unicode webhooks_on_channel_add_method: The webhooks.on_channel_add.method
:param unicode webhooks_on_channel_add_format: The webhooks.on_channel_add.format
:param unicode webhooks_on_channel_destroy_url: The webhooks.on_channel_destroy.url
:param unicode webhooks_on_channel_destroy_method: The webhooks.on_channel_destroy.method
:param unicode webhooks_on_channel_destroy_format: The webhooks.on_channel_destroy.format
:param unicode webhooks_on_channel_update_url: The webhooks.on_channel_update.url
:param unicode webhooks_on_channel_update_method: The webhooks.on_channel_update.method
:param unicode webhooks_on_channel_update_format: The webhooks.on_channel_update.format
:param unicode webhooks_on_member_add_url: The webhooks.on_member_add.url
:param unicode webhooks_on_member_add_method: The webhooks.on_member_add.method
:param unicode webhooks_on_member_add_format: The webhooks.on_member_add.format
:param unicode webhooks_on_member_remove_url: The webhooks.on_member_remove.url
:param unicode webhooks_on_member_remove_method: The webhooks.on_member_remove.method
:param unicode webhooks_on_member_remove_format: The webhooks.on_member_remove.format
:param unicode webhooks_on_message_sent_url: The webhooks.on_message_sent.url
:param unicode webhooks_on_message_sent_method: The webhooks.on_message_sent.method
:param unicode webhooks_on_message_sent_format: The webhooks.on_message_sent.format
:param unicode webhooks_on_message_updated_url: The webhooks.on_message_updated.url
:param unicode webhooks_on_message_updated_method: The webhooks.on_message_updated.method
:param unicode webhooks_on_message_updated_format: The webhooks.on_message_updated.format
:param unicode webhooks_on_message_removed_url: The webhooks.on_message_removed.url
:param unicode webhooks_on_message_removed_method: The webhooks.on_message_removed.method
:param unicode webhooks_on_message_removed_format: The webhooks.on_message_removed.format
:param unicode webhooks_on_channel_added_url: The webhooks.on_channel_added.url
:param unicode webhooks_on_channel_added_method: The webhooks.on_channel_added.method
:param unicode webhooks_on_channel_added_format: The webhooks.on_channel_added.format
:param unicode webhooks_on_channel_destroyed_url: The webhooks.on_channel_destroyed.url
:param unicode webhooks_on_channel_destroyed_method: The webhooks.on_channel_destroyed.method
:param unicode webhooks_on_channel_destroyed_format: The webhooks.on_channel_destroyed.format
:param unicode webhooks_on_channel_updated_url: The webhooks.on_channel_updated.url
:param unicode webhooks_on_channel_updated_method: The webhooks.on_channel_updated.method
:param unicode webhooks_on_channel_updated_format: The webhooks.on_channel_updated.format
:param unicode webhooks_on_member_added_url: The webhooks.on_member_added.url
:param unicode webhooks_on_member_added_method: The webhooks.on_member_added.method
:param unicode webhooks_on_member_added_format: The webhooks.on_member_added.format
:param unicode webhooks_on_member_removed_url: The webhooks.on_member_removed.url
:param unicode webhooks_on_member_removed_method: The webhooks.on_member_removed.method
:param unicode webhooks_on_member_removed_format: The webhooks.on_member_removed.format
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'DefaultServiceRoleSid': default_service_role_sid,
'DefaultChannelRoleSid': default_channel_role_sid,
'DefaultChannelCreatorRoleSid': default_channel_creator_role_sid,
'ReadStatusEnabled': read_status_enabled,
'ReachabilityEnabled': reachability_enabled,
'TypingIndicatorTimeout': typing_indicator_timeout,
'ConsumptionReportInterval': consumption_report_interval,
'Notifications.NewMessage.Enabled': notifications_new_message_enabled,
'Notifications.NewMessage.Template': notifications_new_message_template,
'Notifications.AddedToChannel.Enabled': notifications_added_to_channel_enabled,
'Notifications.AddedToChannel.Template': notifications_added_to_channel_template,
'Notifications.RemovedFromChannel.Enabled': notifications_removed_from_channel_enabled,
'Notifications.RemovedFromChannel.Template': notifications_removed_from_channel_template,
'Notifications.InvitedToChannel.Enabled': notifications_invited_to_channel_enabled,
'Notifications.InvitedToChannel.Template': notifications_invited_to_channel_template,
'PreWebhookUrl': pre_webhook_url,
'PostWebhookUrl': post_webhook_url,
'WebhookMethod': webhook_method,
'WebhookFilters': webhook_filters,
'Webhooks.OnMessageSend.Url': webhooks_on_message_send_url,
'Webhooks.OnMessageSend.Method': webhooks_on_message_send_method,
'Webhooks.OnMessageSend.Format': webhooks_on_message_send_format,
'Webhooks.OnMessageUpdate.Url': webhooks_on_message_update_url,
'Webhooks.OnMessageUpdate.Method': webhooks_on_message_update_method,
'Webhooks.OnMessageUpdate.Format': webhooks_on_message_update_format,
'Webhooks.OnMessageRemove.Url': webhooks_on_message_remove_url,
'Webhooks.OnMessageRemove.Method': webhooks_on_message_remove_method,
'Webhooks.OnMessageRemove.Format': webhooks_on_message_remove_format,
'Webhooks.OnChannelAdd.Url': webhooks_on_channel_add_url,
'Webhooks.OnChannelAdd.Method': webhooks_on_channel_add_method,
'Webhooks.OnChannelAdd.Format': webhooks_on_channel_add_format,
'Webhooks.OnChannelDestroy.Url': webhooks_on_channel_destroy_url,
'Webhooks.OnChannelDestroy.Method': webhooks_on_channel_destroy_method,
'Webhooks.OnChannelDestroy.Format': webhooks_on_channel_destroy_format,
'Webhooks.OnChannelUpdate.Url': webhooks_on_channel_update_url,
'Webhooks.OnChannelUpdate.Method': webhooks_on_channel_update_method,
'Webhooks.OnChannelUpdate.Format': webhooks_on_channel_update_format,
'Webhooks.OnMemberAdd.Url': webhooks_on_member_add_url,
'Webhooks.OnMemberAdd.Method': webhooks_on_member_add_method,
'Webhooks.OnMemberAdd.Format': webhooks_on_member_add_format,
'Webhooks.OnMemberRemove.Url': webhooks_on_member_remove_url,
'Webhooks.OnMemberRemove.Method': webhooks_on_member_remove_method,
'Webhooks.OnMemberRemove.Format': webhooks_on_member_remove_format,
'Webhooks.OnMessageSent.Url': webhooks_on_message_sent_url,
'Webhooks.OnMessageSent.Method': webhooks_on_message_sent_method,
'Webhooks.OnMessageSent.Format': webhooks_on_message_sent_format,
'Webhooks.OnMessageUpdated.Url': webhooks_on_message_updated_url,
'Webhooks.OnMessageUpdated.Method': webhooks_on_message_updated_method,
'Webhooks.OnMessageUpdated.Format': webhooks_on_message_updated_format,
'Webhooks.OnMessageRemoved.Url': webhooks_on_message_removed_url,
'Webhooks.OnMessageRemoved.Method': webhooks_on_message_removed_method,
'Webhooks.OnMessageRemoved.Format': webhooks_on_message_removed_format,
'Webhooks.OnChannelAdded.Url': webhooks_on_channel_added_url,
'Webhooks.OnChannelAdded.Method': webhooks_on_channel_added_method,
'Webhooks.OnChannelAdded.Format': webhooks_on_channel_added_format,
'Webhooks.OnChannelDestroyed.Url': webhooks_on_channel_destroyed_url,
'Webhooks.OnChannelDestroyed.Method': webhooks_on_channel_destroyed_method,
'Webhooks.OnChannelDestroyed.Format': webhooks_on_channel_destroyed_format,
'Webhooks.OnChannelUpdated.Url': webhooks_on_channel_updated_url,
'Webhooks.OnChannelUpdated.Method': webhooks_on_channel_updated_method,
'Webhooks.OnChannelUpdated.Format': webhooks_on_channel_updated_format,
'Webhooks.OnMemberAdded.Url': webhooks_on_member_added_url,
'Webhooks.OnMemberAdded.Method': webhooks_on_member_added_method,
'Webhooks.OnMemberAdded.Format': webhooks_on_member_added_format,
'Webhooks.OnMemberRemoved.Url': webhooks_on_member_removed_url,
'Webhooks.OnMemberRemoved.Method': webhooks_on_member_removed_method,
'Webhooks.OnMemberRemoved.Format': webhooks_on_member_removed_format,
'Limits.ChannelMembers': limits_channel_members,
'Limits.UserChannels': limits_user_channels,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ServiceInstance(
self._version,
payload,
sid=self._solution['sid'],
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.ip_messaging.v1.service.channel.ChannelList
:rtype: twilio.rest.ip_messaging.v1.service.channel.ChannelList
"""
if self._channels is None:
self._channels = ChannelList(
self._version,
service_sid=self._solution['sid'],
)
return self._channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.ip_messaging.v1.service.role.RoleList
:rtype: twilio.rest.ip_messaging.v1.service.role.RoleList
"""
if self._roles is None:
self._roles = RoleList(
self._version,
service_sid=self._solution['sid'],
)
return self._roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.ip_messaging.v1.service.user.UserList
:rtype: twilio.rest.ip_messaging.v1.service.user.UserList
"""
if self._users is None:
self._users = UserList(
self._version,
service_sid=self._solution['sid'],
)
return self._users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V1.ServiceContext {}>'.format(context)
class ServiceInstance(InstanceResource):
def __init__(self, version, payload, sid=None):
"""
Initialize the ServiceInstance
:returns: twilio.rest.ip_messaging.v1.service.ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
super(ServiceInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'friendly_name': payload['friendly_name'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'default_service_role_sid': payload['default_service_role_sid'],
'default_channel_role_sid': payload['default_channel_role_sid'],
'default_channel_creator_role_sid': payload['default_channel_creator_role_sid'],
'read_status_enabled': payload['read_status_enabled'],
'reachability_enabled': payload['reachability_enabled'],
'typing_indicator_timeout': deserialize.integer(payload['typing_indicator_timeout']),
'consumption_report_interval': deserialize.integer(payload['consumption_report_interval']),
'limits': payload['limits'],
'webhooks': payload['webhooks'],
'pre_webhook_url': payload['pre_webhook_url'],
'post_webhook_url': payload['post_webhook_url'],
'webhook_method': payload['webhook_method'],
'webhook_filters': payload['webhook_filters'],
'notifications': payload['notifications'],
'url': payload['url'],
'links': payload['links'],
}
# Context
self._context = None
self._solution = {
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ServiceContext for this ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceContext
"""
if self._context is None:
self._context = ServiceContext(
self._version,
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def default_service_role_sid(self):
"""
:returns: The default_service_role_sid
:rtype: unicode
"""
return self._properties['default_service_role_sid']
@property
def default_channel_role_sid(self):
"""
:returns: The default_channel_role_sid
:rtype: unicode
"""
return self._properties['default_channel_role_sid']
@property
def default_channel_creator_role_sid(self):
"""
:returns: The default_channel_creator_role_sid
:rtype: unicode
"""
return self._properties['default_channel_creator_role_sid']
@property
def read_status_enabled(self):
"""
:returns: The read_status_enabled
:rtype: bool
"""
return self._properties['read_status_enabled']
@property
def reachability_enabled(self):
"""
:returns: The reachability_enabled
:rtype: bool
"""
return self._properties['reachability_enabled']
@property
def typing_indicator_timeout(self):
"""
:returns: The typing_indicator_timeout
:rtype: unicode
"""
return self._properties['typing_indicator_timeout']
@property
def consumption_report_interval(self):
"""
:returns: The consumption_report_interval
:rtype: unicode
"""
return self._properties['consumption_report_interval']
@property
def limits(self):
"""
:returns: The limits
:rtype: dict
"""
return self._properties['limits']
@property
def webhooks(self):
"""
:returns: The webhooks
:rtype: dict
"""
return self._properties['webhooks']
@property
def pre_webhook_url(self):
"""
:returns: The pre_webhook_url
:rtype: unicode
"""
return self._properties['pre_webhook_url']
@property
def post_webhook_url(self):
"""
:returns: The post_webhook_url
:rtype: unicode
"""
return self._properties['post_webhook_url']
@property
def webhook_method(self):
"""
:returns: The webhook_method
:rtype: unicode
"""
return self._properties['webhook_method']
@property
def webhook_filters(self):
"""
:returns: The webhook_filters
:rtype: unicode
"""
return self._properties['webhook_filters']
@property
def notifications(self):
"""
:returns: The notifications
:rtype: dict
"""
return self._properties['notifications']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
webhooks_on_message_send_url=values.unset,
webhooks_on_message_send_method=values.unset,
webhooks_on_message_send_format=values.unset,
webhooks_on_message_update_url=values.unset,
webhooks_on_message_update_method=values.unset,
webhooks_on_message_update_format=values.unset,
webhooks_on_message_remove_url=values.unset,
webhooks_on_message_remove_method=values.unset,
webhooks_on_message_remove_format=values.unset,
webhooks_on_channel_add_url=values.unset,
webhooks_on_channel_add_method=values.unset,
webhooks_on_channel_add_format=values.unset,
webhooks_on_channel_destroy_url=values.unset,
webhooks_on_channel_destroy_method=values.unset,
webhooks_on_channel_destroy_format=values.unset,
webhooks_on_channel_update_url=values.unset,
webhooks_on_channel_update_method=values.unset,
webhooks_on_channel_update_format=values.unset,
webhooks_on_member_add_url=values.unset,
webhooks_on_member_add_method=values.unset,
webhooks_on_member_add_format=values.unset,
webhooks_on_member_remove_url=values.unset,
webhooks_on_member_remove_method=values.unset,
webhooks_on_member_remove_format=values.unset,
webhooks_on_message_sent_url=values.unset,
webhooks_on_message_sent_method=values.unset,
webhooks_on_message_sent_format=values.unset,
webhooks_on_message_updated_url=values.unset,
webhooks_on_message_updated_method=values.unset,
webhooks_on_message_updated_format=values.unset,
webhooks_on_message_removed_url=values.unset,
webhooks_on_message_removed_method=values.unset,
webhooks_on_message_removed_format=values.unset,
webhooks_on_channel_added_url=values.unset,
webhooks_on_channel_added_method=values.unset,
webhooks_on_channel_added_format=values.unset,
webhooks_on_channel_destroyed_url=values.unset,
webhooks_on_channel_destroyed_method=values.unset,
webhooks_on_channel_destroyed_format=values.unset,
webhooks_on_channel_updated_url=values.unset,
webhooks_on_channel_updated_method=values.unset,
webhooks_on_channel_updated_format=values.unset,
webhooks_on_member_added_url=values.unset,
webhooks_on_member_added_method=values.unset,
webhooks_on_member_added_format=values.unset,
webhooks_on_member_removed_url=values.unset,
webhooks_on_member_removed_method=values.unset,
webhooks_on_member_removed_format=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: The friendly_name
:param unicode default_service_role_sid: The default_service_role_sid
:param unicode default_channel_role_sid: The default_channel_role_sid
:param unicode default_channel_creator_role_sid: The default_channel_creator_role_sid
:param bool read_status_enabled: The read_status_enabled
:param bool reachability_enabled: The reachability_enabled
:param unicode typing_indicator_timeout: The typing_indicator_timeout
:param unicode consumption_report_interval: The consumption_report_interval
:param bool notifications_new_message_enabled: The notifications.new_message.enabled
:param unicode notifications_new_message_template: The notifications.new_message.template
:param bool notifications_added_to_channel_enabled: The notifications.added_to_channel.enabled
:param unicode notifications_added_to_channel_template: The notifications.added_to_channel.template
:param bool notifications_removed_from_channel_enabled: The notifications.removed_from_channel.enabled
:param unicode notifications_removed_from_channel_template: The notifications.removed_from_channel.template
:param bool notifications_invited_to_channel_enabled: The notifications.invited_to_channel.enabled
:param unicode notifications_invited_to_channel_template: The notifications.invited_to_channel.template
:param unicode pre_webhook_url: The pre_webhook_url
:param unicode post_webhook_url: The post_webhook_url
:param unicode webhook_method: The webhook_method
:param unicode webhook_filters: The webhook_filters
:param unicode webhooks_on_message_send_url: The webhooks.on_message_send.url
:param unicode webhooks_on_message_send_method: The webhooks.on_message_send.method
:param unicode webhooks_on_message_send_format: The webhooks.on_message_send.format
:param unicode webhooks_on_message_update_url: The webhooks.on_message_update.url
:param unicode webhooks_on_message_update_method: The webhooks.on_message_update.method
:param unicode webhooks_on_message_update_format: The webhooks.on_message_update.format
:param unicode webhooks_on_message_remove_url: The webhooks.on_message_remove.url
:param unicode webhooks_on_message_remove_method: The webhooks.on_message_remove.method
:param unicode webhooks_on_message_remove_format: The webhooks.on_message_remove.format
:param unicode webhooks_on_channel_add_url: The webhooks.on_channel_add.url
:param unicode webhooks_on_channel_add_method: The webhooks.on_channel_add.method
:param unicode webhooks_on_channel_add_format: The webhooks.on_channel_add.format
:param unicode webhooks_on_channel_destroy_url: The webhooks.on_channel_destroy.url
:param unicode webhooks_on_channel_destroy_method: The webhooks.on_channel_destroy.method
:param unicode webhooks_on_channel_destroy_format: The webhooks.on_channel_destroy.format
:param unicode webhooks_on_channel_update_url: The webhooks.on_channel_update.url
:param unicode webhooks_on_channel_update_method: The webhooks.on_channel_update.method
:param unicode webhooks_on_channel_update_format: The webhooks.on_channel_update.format
:param unicode webhooks_on_member_add_url: The webhooks.on_member_add.url
:param unicode webhooks_on_member_add_method: The webhooks.on_member_add.method
:param unicode webhooks_on_member_add_format: The webhooks.on_member_add.format
:param unicode webhooks_on_member_remove_url: The webhooks.on_member_remove.url
:param unicode webhooks_on_member_remove_method: The webhooks.on_member_remove.method
:param unicode webhooks_on_member_remove_format: The webhooks.on_member_remove.format
:param unicode webhooks_on_message_sent_url: The webhooks.on_message_sent.url
:param unicode webhooks_on_message_sent_method: The webhooks.on_message_sent.method
:param unicode webhooks_on_message_sent_format: The webhooks.on_message_sent.format
:param unicode webhooks_on_message_updated_url: The webhooks.on_message_updated.url
:param unicode webhooks_on_message_updated_method: The webhooks.on_message_updated.method
:param unicode webhooks_on_message_updated_format: The webhooks.on_message_updated.format
:param unicode webhooks_on_message_removed_url: The webhooks.on_message_removed.url
:param unicode webhooks_on_message_removed_method: The webhooks.on_message_removed.method
:param unicode webhooks_on_message_removed_format: The webhooks.on_message_removed.format
:param unicode webhooks_on_channel_added_url: The webhooks.on_channel_added.url
:param unicode webhooks_on_channel_added_method: The webhooks.on_channel_added.method
:param unicode webhooks_on_channel_added_format: The webhooks.on_channel_added.format
:param unicode webhooks_on_channel_destroyed_url: The webhooks.on_channel_destroyed.url
:param unicode webhooks_on_channel_destroyed_method: The webhooks.on_channel_destroyed.method
:param unicode webhooks_on_channel_destroyed_format: The webhooks.on_channel_destroyed.format
:param unicode webhooks_on_channel_updated_url: The webhooks.on_channel_updated.url
:param unicode webhooks_on_channel_updated_method: The webhooks.on_channel_updated.method
:param unicode webhooks_on_channel_updated_format: The webhooks.on_channel_updated.format
:param unicode webhooks_on_member_added_url: The webhooks.on_member_added.url
:param unicode webhooks_on_member_added_method: The webhooks.on_member_added.method
:param unicode webhooks_on_member_added_format: The webhooks.on_member_added.format
:param unicode webhooks_on_member_removed_url: The webhooks.on_member_removed.url
:param unicode webhooks_on_member_removed_method: The webhooks.on_member_removed.method
:param unicode webhooks_on_member_removed_format: The webhooks.on_member_removed.format
:param unicode limits_channel_members: The limits.channel_members
:param unicode limits_user_channels: The limits.user_channels
:returns: Updated ServiceInstance
:rtype: twilio.rest.ip_messaging.v1.service.ServiceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_service_role_sid=default_service_role_sid,
default_channel_role_sid=default_channel_role_sid,
default_channel_creator_role_sid=default_channel_creator_role_sid,
read_status_enabled=read_status_enabled,
reachability_enabled=reachability_enabled,
typing_indicator_timeout=typing_indicator_timeout,
consumption_report_interval=consumption_report_interval,
notifications_new_message_enabled=notifications_new_message_enabled,
notifications_new_message_template=notifications_new_message_template,
notifications_added_to_channel_enabled=notifications_added_to_channel_enabled,
notifications_added_to_channel_template=notifications_added_to_channel_template,
notifications_removed_from_channel_enabled=notifications_removed_from_channel_enabled,
notifications_removed_from_channel_template=notifications_removed_from_channel_template,
notifications_invited_to_channel_enabled=notifications_invited_to_channel_enabled,
notifications_invited_to_channel_template=notifications_invited_to_channel_template,
pre_webhook_url=pre_webhook_url,
post_webhook_url=post_webhook_url,
webhook_method=webhook_method,
webhook_filters=webhook_filters,
webhooks_on_message_send_url=webhooks_on_message_send_url,
webhooks_on_message_send_method=webhooks_on_message_send_method,
webhooks_on_message_send_format=webhooks_on_message_send_format,
webhooks_on_message_update_url=webhooks_on_message_update_url,
webhooks_on_message_update_method=webhooks_on_message_update_method,
webhooks_on_message_update_format=webhooks_on_message_update_format,
webhooks_on_message_remove_url=webhooks_on_message_remove_url,
webhooks_on_message_remove_method=webhooks_on_message_remove_method,
webhooks_on_message_remove_format=webhooks_on_message_remove_format,
webhooks_on_channel_add_url=webhooks_on_channel_add_url,
webhooks_on_channel_add_method=webhooks_on_channel_add_method,
webhooks_on_channel_add_format=webhooks_on_channel_add_format,
webhooks_on_channel_destroy_url=webhooks_on_channel_destroy_url,
webhooks_on_channel_destroy_method=webhooks_on_channel_destroy_method,
webhooks_on_channel_destroy_format=webhooks_on_channel_destroy_format,
webhooks_on_channel_update_url=webhooks_on_channel_update_url,
webhooks_on_channel_update_method=webhooks_on_channel_update_method,
webhooks_on_channel_update_format=webhooks_on_channel_update_format,
webhooks_on_member_add_url=webhooks_on_member_add_url,
webhooks_on_member_add_method=webhooks_on_member_add_method,
webhooks_on_member_add_format=webhooks_on_member_add_format,
webhooks_on_member_remove_url=webhooks_on_member_remove_url,
webhooks_on_member_remove_method=webhooks_on_member_remove_method,
webhooks_on_member_remove_format=webhooks_on_member_remove_format,
webhooks_on_message_sent_url=webhooks_on_message_sent_url,
webhooks_on_message_sent_method=webhooks_on_message_sent_method,
webhooks_on_message_sent_format=webhooks_on_message_sent_format,
webhooks_on_message_updated_url=webhooks_on_message_updated_url,
webhooks_on_message_updated_method=webhooks_on_message_updated_method,
webhooks_on_message_updated_format=webhooks_on_message_updated_format,
webhooks_on_message_removed_url=webhooks_on_message_removed_url,
webhooks_on_message_removed_method=webhooks_on_message_removed_method,
webhooks_on_message_removed_format=webhooks_on_message_removed_format,
webhooks_on_channel_added_url=webhooks_on_channel_added_url,
webhooks_on_channel_added_method=webhooks_on_channel_added_method,
webhooks_on_channel_added_format=webhooks_on_channel_added_format,
webhooks_on_channel_destroyed_url=webhooks_on_channel_destroyed_url,
webhooks_on_channel_destroyed_method=webhooks_on_channel_destroyed_method,
webhooks_on_channel_destroyed_format=webhooks_on_channel_destroyed_format,
webhooks_on_channel_updated_url=webhooks_on_channel_updated_url,
webhooks_on_channel_updated_method=webhooks_on_channel_updated_method,
webhooks_on_channel_updated_format=webhooks_on_channel_updated_format,
webhooks_on_member_added_url=webhooks_on_member_added_url,
webhooks_on_member_added_method=webhooks_on_member_added_method,
webhooks_on_member_added_format=webhooks_on_member_added_format,
webhooks_on_member_removed_url=webhooks_on_member_removed_url,
webhooks_on_member_removed_method=webhooks_on_member_removed_method,
webhooks_on_member_removed_format=webhooks_on_member_removed_format,
limits_channel_members=limits_channel_members,
limits_user_channels=limits_user_channels,
)
@property
def channels(self):
"""
Access the channels
:returns: twilio.rest.ip_messaging.v1.service.channel.ChannelList
:rtype: twilio.rest.ip_messaging.v1.service.channel.ChannelList
"""
return self._proxy.channels
@property
def roles(self):
"""
Access the roles
:returns: twilio.rest.ip_messaging.v1.service.role.RoleList
:rtype: twilio.rest.ip_messaging.v1.service.role.RoleList
"""
return self._proxy.roles
@property
def users(self):
"""
Access the users
:returns: twilio.rest.ip_messaging.v1.service.user.UserList
:rtype: twilio.rest.ip_messaging.v1.service.user.UserList
"""
return self._proxy.users
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Chat.V1.ServiceInstance {}>'.format(context)
| mit | -8,875,363,262,685,539,000 | 46.29943 | 115 | 0.655721 | false |
vardis/pano | src/pano/model/__init__.py | 1 | 1157 | '''
Copyright (c) 2008 Georgios Giannoudovardis, <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
__all__ = [ "Hotspot", "Node"] | mit | 7,207,051,549,773,082,000 | 46.291667 | 77 | 0.770959 | false |
berndca/xmodels | xmodels/constraints.py | 1 | 12866 | from __future__ import unicode_literals
from collections import namedtuple
import logging
from six import string_types
from .fields import RegexField, ValidationException, NCName, Name
logger = logging.getLogger(__name__)
KeyRef = namedtuple('KeyRef', 'key_name key_value ref_path')
class KeyStore(object):
"""
Base class for all key and unique stores. It contains two dictionaries:
* index: {key_name: list_of_target_paths}
* keys: {'%s:%s % (key_name, target_path): {key_value: key_path}}
"""
def __init__(self):
self.index = {}
self.keys = {}
def add_key(self, key_names, target_path):
if isinstance(key_names, list):
key_names_list = key_names
else:
key_names_list = [key_names]
for key_name in key_names_list:
key = '%s:%s' % (key_name, target_path)
if key in self.keys:
raise ValidationException('Key %s does already exist.' % key,
target_path)
if key_name not in self.index:
self.index[key_name] = [target_path]
else:
self.index[key_name].append(target_path)
self.keys[key] = {}
def in_keys(self, key_name, target_path):
return '%s:%s' % (key_name, target_path) in self.keys
def add_value(self, key_names, target_path, key_value, key_path):
if isinstance(key_names, string_types):
key_names_list = [key_names]
else:
key_names_list = key_names
for key_name in key_names_list:
key = '%s:%s' % (key_name, target_path)
if self.in_keys(key_name, target_path):
if key_value in self.keys[key]:
msg = 'Duplicate key value %s for %s at %s' % (key_value,
key_name,
key_path)
raise ValidationException(msg, key_value)
self.keys[key][key_value] = key_path
return True
msg = 'Could not find target path %s for key name(s) %s' % \
(target_path, ', '.join(key_names_list))
raise ValidationException(msg, key_value)
def match_ref(self, key_name, ref_key_value):
if key_name not in self.index:
raise ValidationException('No key for %s exists' % key_name,
key_name)
for key_path in self.index[key_name]:
key = '%s:%s' % (key_name, key_path)
for key_value, instance_path in self.keys[key].items():
if key_value == ref_key_value:
return instance_path
raise ValidationException('Could not match ref %s for %s' % (
ref_key_value, key_name), ref_key_value)
def key_value_count(self, key_name, target_path):
key = '%s:%s' % (key_name, target_path)
if key in self.keys:
return len(self.keys[key])
return 0
class IDStore(KeyStore):
"""
ID's are a special case of key since all of them share the same path '/'
and of course they all share the same name 'ID'.
"""
key_name = 'ID'
path = '/'
def __init__(self):
super(IDStore, self).__init__()
super(IDStore, self).add_key(self.key_name, self.path)
def add_id(self, key_value, key_path):
super(IDStore, self).add_value(self.key_name, self.path,
key_value, key_path)
def match_id(self, ref_key_value):
return super(IDStore, self).match_ref(self.key_name, ref_key_value)
def id_count(self):
return super(IDStore, self).key_value_count(self.key_name, self.path)
class RefStore(object):
"""
Store for keyref identity constraints.
* refs: list of namedtuple KeyRef(key_name, key_value, ref_path)
* targets: dict {ref_path: target_path}
"""
def __init__(self):
self.refs = []
self.targets = {}
def add_key_ref(self, key_name, key_value, ref_path):
if not key_value:
raise ValidationException('key value is required', key_value)
self.refs.append(KeyRef(key_name, key_value, ref_path))
def set_target(self, ref_path, target_path):
if ref_path in self.targets:
raise ValidationException('Target for ref_path already exists.',
ref_path)
self.targets[ref_path] = target_path
class IDREFStore(RefStore):
"""
Store for IDREF. All IDREF refer to the same key: 'ID'.
"""
def add_idref(self, key_value, ref_path):
super(IDREFStore, self).add_key_ref('ID', key_value, ref_path)
class Stores(object):
"""
Combination of all identity constraint related stores in a single object.
"""
def __init__(self):
self.keyStore = KeyStore()
self.uniquesStore = KeyStore()
self.idStore = IDStore()
self.refStore = RefStore()
self.idrefStore = IDREFStore()
def get_value_path_stores(**kwargs):
messages = dict(
path='No path supplied.',
store='Parameter store of type Stores expected.',
)
stores = kwargs.get('stores')
path = kwargs.get('path')
if stores is not None:
if not isinstance(stores, Stores):
raise TypeError(messages['store'])
return path, stores
class InitStores(object):
"""
Initializes stores.keyStore uf key_names or stores.uniquesStore
if unique_names by adding keys/path.
"""
key_names = None
unique_names = None
messages = dict(
name='key names (string or list of strings) is required and can not '
'be empty.',
store='Parameter store of type Stores expected.',
)
def add_keys(self, path='', stores=None):
if self.key_names:
stores.keyStore.add_key(self.key_names, path)
if self.unique_names:
stores.uniquesStore.add_key(self.unique_names, path)
def check_key_name(self, key_name):
if not key_name or not isinstance(key_name, string_types):
raise ValueError(self.messages['name'])
class InitKeyStore(InitStores):
"""
Creates an empty dict under
stores.keyStore[keyName:keyTargetInstancePath]
"""
messages = dict(
name='keyName (string) is required and can not be empty.',
store='Parameter store of type Stores expected.',
)
def __init__(self, key_name):
self.check_key_name(key_name)
self.key_names = [key_name]
class InitUniqueStore(InitStores):
"""
Creates an empty dict under
stores.uniquesStore[keyName:keyTargetInstancePath]
"""
def __init__(self, key_name):
self.check_key_name(key_name)
self.unique_names = [key_name]
class SetupKeyRefsStore(object):
"""
"""
string_validator_instance = None
refer_key_name = None
messages = dict(
names='%(keyNames (type list of strings or string) is is required.',
emptyValue='Value may not be empty.',
)
def __init__(self, refer_key_name, **kwargs):
self.string_validator_instance = kwargs.get(
'string_validator_instance', self.string_validator_instance)
self.refer_key_name = refer_key_name
def validate(self, key_value, **kwargs):
path, stores = get_value_path_stores(**kwargs)
if self.string_validator_instance:
string_value = self.string_validator_instance.validate(key_value)
else:
string_value = key_value
if stores:
stores.refStore.add_key_ref(self.refer_key_name,
string_value, path)
return string_value
class CheckKeys(object):
"""
Determines the targetPath by removing <level>s from path.
Looks up store[keyName:keyTargetInstancePath] for all
keyNames and checks the dict if keyValue (element.value) is already
present (duplicate error). If not it adds the element.value as key
and element.path as value.
"""
not_empty = True
string_validator_instance = None
key_names = None
refer_key_name = None
level = None
messages = dict(
names='%(keyNames (type list of strings or string) is is required.',
stores='%(stores (type dict) is is required.',
missing='%(param)s is required for CheckKeys.',
type='%(param)s should be of type %(type)s.',
duplicate='%(value)s is a duplicate entry for key %(key)s.',
noMatch='Could not find match for path %(path)s.',
stateMissing='Parameter state is required.',
emptyValue='Value may not be empty.',
)
def __init__(self, **kwargs):
self.key_names = kwargs.get('key_names', self.key_names)
self.level = kwargs.get('level', self.level)
assert self.key_names, self.messages['names']
if isinstance(self.key_names, list):
assert self.key_names
for name in self.key_names:
assert isinstance(name, string_types)
else:
assert isinstance(self.key_names, string_types)
self.key_names = [self.key_names]
assert isinstance(self.level, int)
def validate(self, key_value, **kwargs):
path, stores = get_value_path_stores(**kwargs)
if not key_value:
if not self.not_empty:
return key_value
# self.not_empty
raise ValidationException(self.messages['emptyValue'], key_value)
if self.string_validator_instance:
string_value = self.string_validator_instance.validate(key_value)
else:
string_value = key_value
if stores is None:
return string_value
target_path = '.'.join(path.split('.')[:-self.level])
if self.refer_key_name:
stores.refStore.add_key_ref(self.refer_key_name, key_value, path)
self.add_value(stores, target_path, string_value, path)
return string_value
def add_value(self, stores, target_path, value, path):
if self.key_names:
stores.keyStore.add_value(self.key_names, target_path, value, path)
class CheckUniques(CheckKeys):
not_empty = False
key_names = None
def add_value(self, stores, target_path, value, path):
if self.key_names:
stores.uniquesStore.add_value(self.key_names, target_path,
value, path)
class KeyName(CheckKeys):
"""
"""
not_empty = True
store_name = 'keyStore'
string_validator_instance = Name()
class UniqueName(CheckUniques):
"""
A UniqueName is of type Name and may be empty.
"""
not_empty = False
string_validator_instance = Name()
class ID(NCName):
"""
The type ID is used for an attribute that uniquely identifies an element
in an XML document. An ID value must conform to the rules for an NCName.
This means that it must start with a letter or underscore, and can only
contain letters, digits, underscores, hyphens, and periods. ID values
must be unique within an XML instance, regardless of the attribute's name
or its element name.
"""
not_empty = True
def validate(self, key_value, **kwargs):
path, stores = get_value_path_stores(**kwargs)
string_value = super(ID, self).validate(key_value, **kwargs)
if stores:
stores.idStore.add_id(string_value, path)
return key_value
class IDREF(NCName):
"""
The type ID is used for an attribute that uniquely identifies an element
in an XML document. An ID value must conform to the rules for an NCName.
This means that it must start with a letter or underscore, and can only
contain letters, digits, underscores, hyphens, and periods. ID values
must be unique within an XML instance, regardless of the attribute's name
or its element name.
"""
default_build_value = 'testId0'
not_empty = True
def validate(self, key_value, **kwargs):
path, stores = get_value_path_stores(**kwargs)
string_value = super(IDREF, self).validate(key_value, **kwargs)
if stores:
stores.idrefStore.add_idref(string_value, path)
return key_value
def match_refs(stores):
def match_store_refs(key_store, ref_store):
for ref in ref_store.refs:
instance_path = key_store.match_ref(ref.key_name, ref.key_value)
ref_store.set_target(ref.ref_path, instance_path)
logger.debug('Successfully matched "%s/%s", got: %r'
% (ref.key_name, ref.key_value, instance_path))
match_store_refs(stores.keyStore, stores.refStore)
match_store_refs(stores.idStore, stores.idrefStore)
| bsd-3-clause | 3,555,487,826,444,224,000 | 33.218085 | 79 | 0.596844 | false |
agripo/website | core/models/shop.py | 1 | 12773 | from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models, IntegrityError
from django.db.models import Q, Sum
from django.utils import timezone
from django.db.models.signals import pre_save, post_save
from django.contrib.auth.models import User
from core.exceptions import CantSetCartQuantityOnUnsavedProduct, AddedMoreToCartThanAvailable
from core.models.users import AgripoUser
class ProductCategory(models.Model):
on_change_delete_cache = True
name = models.CharField(verbose_name='Nom de la catégorie', max_length=60, blank=False, null=False, unique=True)
def clean(self):
if self.name == '':
raise ValidationError('Empty category name')
def __str__(self):
return "{} : {}".format(self.id, self.name)
class Meta:
verbose_name = "Catégorie de produits"
verbose_name_plural = "Catégories de produits"
class Product(models.Model):
on_change_delete_cache = True
name = models.CharField(
max_length=60, blank=False, null=False, unique=True, verbose_name="Nom",
help_text="Nom affiché dans les fiches produits")
scientific_name = models.CharField(
default="", max_length=60, blank=True, null=False, verbose_name="Nom scientifique",
help_text="Nom affiché entre parenthèses dans les fiches produits")
category = models.ForeignKey(
ProductCategory, blank=False, null=False, verbose_name="Catégorie",
help_text="Catégorie sous laquelle apparaît ce produit.")
price = models.PositiveIntegerField(verbose_name="Prix unitaire", default=0, blank=False, null=False)
QUANTITY_TYPE_KILO = "k"
QUANTITY_TYPE_UNIT = "U"
QUANTITY_TYPE_LITER = "L"
PROGRAMMED_STATUS = (
(QUANTITY_TYPE_KILO, 'le kg'),
(QUANTITY_TYPE_LITER, 'le litre'),
(QUANTITY_TYPE_UNIT, 'l\'unité'),
)
quantity_type = models.CharField(
verbose_name="Unité", max_length=1, choices=PROGRAMMED_STATUS, default=QUANTITY_TYPE_KILO)
image = models.ImageField(
upload_to='products', blank=True, null=True, default="default/not_found.jpg", verbose_name="Image",
help_text="Cette image représente le produit.<br />"
"Elle doit faire 150x150px. "
"Si la largeur est différente de la hauteur, l'image apparaitra déformée."
)
description = models.TextField(verbose_name="Description du produit", default="", blank=True, null=False)
farmers = models.ManyToManyField(AgripoUser, verbose_name='Agriculteurs', through="Stock")
stock = models.PositiveIntegerField(
verbose_name='Stock',
default=0,
help_text="Champ alimenté automatiquement en fonction des déclarations des agriculteurs.")
bought = models.PositiveIntegerField(
verbose_name='Acheté',
default=0,
help_text="Champ alimenté automatiquement en fonction des commandes passées")
def __str__(self):
return "{} : {}".format(self.id, self.name)
def clean(self):
if self.name == '':
raise ValidationError('Empty product name')
if self.price <= 0:
raise ValidationError('Price should be bigger than zero')
def image_tag(self):
return u'<img src="{}" style="width:150px;height:140px;"/>'.format(settings.MEDIA_URL + str(self.image))
image_tag.short_description = 'Miniature'
image_tag.allow_tags = True
def update_stock(self):
# Stock = Sum(farmers_stocks) - Sum(active_commands)
farmers_stock = Stock.objects.filter(product_id=self.id).aggregate(Sum('stock'))
stock = farmers_stock['stock__sum']
self.stock = stock
self.save()
def set_cart_quantity(self, user, quantity):
if not self.id:
raise CantSetCartQuantityOnUnsavedProduct
if quantity > self.available_stock():
raise AddedMoreToCartThanAvailable
if quantity == 0:
CartProduct.objects.filter(user=user, product_id=self.pk).delete()
else:
CartProduct.objects.update_or_create(user=user, product=self, defaults={'quantity': quantity})
return self
def get_cart_quantity(self, request):
cart_product = CartProduct.objects.filter(user=request.user, product=self)
if cart_product:
return cart_product[0].quantity
return 0
def buy(self, quantity):
if self.available_stock() < quantity:
raise AddedMoreToCartThanAvailable()
self.bought += quantity
self.save()
return self
def available_stock(self):
return self.stock - self.bought
def is_available(self):
return self.available_stock() > 0
is_available.__name__ = "Disponible"
is_available.boolean = True
@staticmethod
def static_get_cart_products(user):
cart_products = CartProduct.objects.filter(user=user)
ret = []
for cart_product in cart_products:
ret.append(dict(
id=cart_product.product_id, quantity=cart_product.quantity))
return ret
@staticmethod
def static_clear_cart(user):
CartProduct.objects.filter(user=user).delete()
class Meta:
verbose_name = "Produit"
verbose_name_plural = "Produits"
class CartProduct(models.Model):
user = models.ForeignKey(User)
product = models.ForeignKey(Product)
quantity = models.IntegerField()
class Meta:
unique_together = ("user", "product")
class Stock(models.Model):
on_change_delete_cache = True
product = models.ForeignKey(Product, verbose_name='Produit', related_name="one_farmers_stock")
farmer = models.ForeignKey(AgripoUser, verbose_name='Agriculteur', limit_choices_to=Q(groups__name='farmers'))
stock = models.PositiveIntegerField(default=0, verbose_name="Stock")
class Meta:
unique_together = ("product", "farmer", )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def save(self, **kwargs):
if not self.farmer.is_farmer():
raise IntegrityError("Only farmers have stocks")
ret = super().save(**kwargs)
self.product.update_stock()
return ret
def set(self, stock):
"""
Updating the stock for this product in this farmer's account and on the product's general data
:param stock: The new stock for this product and for this farmer
:return: the Stock object
"""
self.stock = stock
self.save()
self.product.update_stock()
return self
class DeliveryPoint(models.Model):
name = models.CharField(verbose_name='Nom', max_length=64, unique=True)
description = models.TextField(verbose_name='Description', max_length=512)
def __str__(self):
return self.name
class Meta:
verbose_name = "Lieu de livraison"
verbose_name_plural = "Lieux de livraison"
class DeliveryQueryset(models.query.QuerySet):
def available(self):
return self.filter(done=False, date__gte=timezone.now()).order_by("date")
def done(self):
return self.filter(Q(done=True) | Q(date__lt=timezone.now()))
class DeliveryManager(models.Manager):
def get_queryset(self):
return DeliveryQueryset(self.model, using=self._db)
def available(self):
return self.get_queryset().available()
def done(self):
return self.get_queryset().done()
class Delivery(models.Model):
on_change_delete_cache = True
date = models.DateTimeField(verbose_name='Date de la livraison', default=timezone.now)
delivery_point = models.ForeignKey(DeliveryPoint, verbose_name="Lieu de livraison")
done = models.BooleanField(default=False, verbose_name="Livraison effectuée")
objects = DeliveryManager()
def __str__(self):
return "{} à {}".format(self.date.strftime("Le %d/%m à %Hh%M"), self.delivery_point.name)
def details_link(self):
count = self.commands.count()
if not count:
return "", 0
return reverse("delivery_details", kwargs=dict(id=self.pk)), count
def details(self):
total = {}
total_price = 0
commands = self.commands.all()
for command in commands:
total_price += command.total
commandproducts = command.commandproduct_set.all()
for commandproduct in commandproducts:
if commandproduct.product.pk not in total:
total[commandproduct.product.pk] = dict(quantity=0, product=commandproduct, total=0)
total[commandproduct.product.pk]['quantity'] += commandproduct.quantity
return {
'total': total,
'total_price': total_price,
'commands': commands
}
def write_done(self, done=True):
self.done = done
self.save()
return self
class Meta:
verbose_name = "Livraison"
verbose_name_plural = "Livraisons"
previous_delivery_done = False
def delivery_pre_saved(sender, **kwargs):
global previous_delivery_done
instance = kwargs.get('instance')
if isinstance(instance, Delivery):
try:
previous_delivery_done = Delivery.objects.get(pk=instance.pk).done
except instance.DoesNotExist:
# Gives a false result, but should only be used during tests (the product was checked in memory
previous_delivery_done = instance.done
def delivery_saved(sender, **kwargs):
global previous_delivery_done
instance = kwargs.get('instance')
if isinstance(instance, Delivery):
if instance.done != previous_delivery_done:
# Listing the total quantities bought for all the commands in this delivery
bought_stocks = {}
for command in instance.commands.all():
for cp in command.commandproduct_set.all():
if cp.product.pk not in bought_stocks:
bought_stocks[cp.product.pk] = 0
bought_stocks[cp.product.pk] += cp.quantity
for product_id, stock in bought_stocks.items():
product = Product.objects.get(pk=product_id)
# We update the stocks for the commanded products
if instance.done:
product.bought -= stock
else:
product.bought += stock
product.update_stock()
pre_save.connect(delivery_pre_saved)
post_save.connect(delivery_saved)
class Command(models.Model):
"""
A command is the listing of the products for one customer in one delivery
"""
customer = models.ForeignKey(AgripoUser, verbose_name='Client', null=True)
delivery = models.ForeignKey(
Delivery, verbose_name="Lieu de livraison", related_name="commands",
help_text="Sélectionnez le lieu de livraison")
date = models.DateTimeField(verbose_name='Date', auto_now_add=True)
products = models.ManyToManyField(Product, verbose_name='Produits', through="CommandProduct")
sent = models.BooleanField(verbose_name='Envoyée ?', default=False)
message = models.TextField(
max_length=256, null=True, default="", verbose_name="Message",
help_text="Informations supplémentaires en rapport avec votre commande")
total = models.PositiveIntegerField(verbose_name='Total', default=0)
def __str__(self):
return "{} : {}".format(self.date.strftime("Le %d/%m à %Hh%M"), self.customer)
def validate(self):
# We get the products from the cart
products = Product.static_get_cart_products(self.customer)
for product in products:
the_product = Product.objects.get(id=product['id'])
cp = CommandProduct(command=self, product=the_product, quantity=product['quantity'])
cp.save()
the_product.buy(product['quantity'])
self.total += product['quantity'] * the_product.price
Product.static_clear_cart(self.customer)
self.save()
def is_sent(self):
return self.sent
def send(self):
self.sent = True
self.save()
return self
class CommandProduct(models.Model):
command = models.ForeignKey(Command)
product = models.ForeignKey(Product)
quantity = models.PositiveSmallIntegerField()
def __str__(self):
return "{} / {}".format(self.command, self.product)
def clean(self):
if self.quantity <= 0:
raise ValidationError('Quantity must be bigger than 0')
return super().clean()
class Meta:
unique_together = ('command', 'product', )
| gpl-2.0 | -3,663,379,892,135,324,000 | 33.448649 | 116 | 0.642162 | false |
zhuyue1314/simuvex | simuvex/s_slicer.py | 1 | 3706 |
import pyvex
from .s_errors import SimSlicerError
class SimSlicer(object):
"""
A super lightweight single-IRSB slicing class.
"""
def __init__(self, statements, target_tmps=None, target_regs=None, inslice_callback=None, inslice_callback_infodict=None):
self._statements = statements
self._target_tmps = target_tmps if target_tmps else set()
self._target_regs = target_regs if target_regs else set()
self._inslice_callback = inslice_callback
# It could be accessed publicly
self.inslice_callback_infodict = inslice_callback_infodict
self.stmts = [ ]
self.stmt_indices = [ ]
self.final_regs = set()
if not self._target_tmps and not self._target_regs:
raise SimSlicerError('Target temps and/or registers must be specified.')
self._slice()
def _slice(self):
"""
Slice it!
"""
regs = set(self._target_regs)
tmps = set(self._target_tmps)
for stmt_idx, stmt in reversed(list(enumerate(self._statements))):
if self._backward_handler_stmt(stmt, tmps, regs):
self.stmts.insert(0, stmt)
self.stmt_indices.insert(0, stmt_idx)
if self._inslice_callback:
self._inslice_callback(stmt_idx, stmt, self.inslice_callback_infodict)
if not regs and not tmps:
break
self.final_regs = regs
#
# Backward slice IRStmt handlers
#
def _backward_handler_stmt(self, stmt, temps, regs):
funcname = "_backward_handler_stmt_%s" % type(stmt).__name__
in_slice = False
if hasattr(self, funcname):
in_slice = getattr(self, funcname)(stmt, temps, regs)
return in_slice
def _backward_handler_stmt_WrTmp(self, stmt, temps, regs):
tmp = stmt.tmp
if tmp not in temps:
return False
temps.remove(tmp)
self._backward_handler_expr(stmt.data, temps, regs)
return True
def _backward_handler_stmt_Put(self, stmt, temps, regs):
reg = stmt.offset
if reg in regs:
regs.remove(reg)
self._backward_handler_expr(stmt.data, temps, regs)
return True
else:
return False
#
# Backward slice IRExpr handlers
#
def _backward_handler_expr(self, expr, temps, regs):
funcname = "_backward_handler_expr_%s" % type(expr).__name__
in_slice = False
if hasattr(self, funcname):
in_slice = getattr(self, funcname)(expr, temps, regs)
return in_slice
def _backward_handler_expr_RdTmp(self, expr, temps, regs):
tmp = expr.tmp
temps.add(tmp)
def _backward_handler_expr_Get(self, expr, temps, regs):
reg = expr.offset
regs.add(reg)
def _backward_handler_expr_Load(self, expr, temps, regs):
addr = expr.addr
if type(addr) is pyvex.IRExpr.RdTmp:
# FIXME: Process other types
self._backward_handler_expr(addr, temps, regs)
def _backward_handler_expr_Unop(self, expr, temps, regs):
arg = expr.args[0]
if type(arg) is pyvex.IRExpr.RdTmp:
self._backward_handler_expr(arg, temps, regs)
def _backward_handler_expr_CCall(self, expr, temps, regs):
for arg in expr.args:
if type(arg) is pyvex.IRExpr.RdTmp:
self._backward_handler_expr(arg, temps, regs)
def _backward_handler_expr_Binop(self, expr, temps, regs):
for arg in expr.args:
if type(arg) is pyvex.IRExpr.RdTmp:
self._backward_handler_expr(arg, temps, regs)
| bsd-2-clause | 7,626,896,577,661,538,000 | 26.864662 | 126 | 0.588505 | false |
ConflictGK/Codecatch-RSSE | properties.py | 1 | 1114 | import os
class Properties:
def __init__(self, query, example_query_index = -1, thepath = None):
self.query = query
main_dir = os.getcwd()
self.SCRAPY_EXEC = "C:/WinPython36/python-3.6.3.amd64/Scripts/scrapy.exe"
self.PARENT_DIR = main_dir + os.path.sep
if example_query_index >= 0:
self.DATA_DIR = self.PARENT_DIR + "experiments" + os.path.sep + "query" + str(example_query_index) + os.path.sep
else:
if thepath and thepath != "None":
self.DATA_DIR = self.PARENT_DIR + "data" + os.path.sep + thepath + os.path.sep
else:
self.DATA_DIR = self.PARENT_DIR + "data" + os.path.sep
self.SRESULTS_A = self.DATA_DIR[:-1].split(os.path.sep)[-2] + os.path.sep + self.DATA_DIR[:-1].split(os.path.sep)[-1] + os.path.sep + 'resultsA.json'
self.RESULTS_A = self.DATA_DIR + 'resultsA.json'
self.RESULTS_B = self.DATA_DIR + 'resultsB.json'
self.RESULTS_C = self.DATA_DIR + 'resultsC.json'
self.RESULTS_D = self.DATA_DIR + 'resultsD.json'
self.RESULTS_S = self.DATA_DIR + 'resultsS.json'
self.QUERY_DATA_FILE = self.RESULTS_A
self.RESULTS_FILE = self.RESULTS_D
| mit | 5,942,278,755,511,353,000 | 46.434783 | 151 | 0.654399 | false |
npinto/pytest | _pytest/skipping.py | 1 | 9336 | """ support for skip/xfail functions and markers. """
import py, pytest
import sys
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--runxfail',
action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail")
def pytest_configure(config):
config.addinivalue_line("markers",
"skipif(*conditions): skip the given test function if evaluation "
"of all conditions has a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. "
)
config.addinivalue_line("markers",
"xfail(*conditions, reason=None, run=True): mark the the test function "
"as an expected failure. Optionally specify a reason and run=False "
"if you don't even want to execute the test function. Any positional "
"condition strings will be evaluated (like with skipif) and if one is "
"False the marker will not be applied."
)
def pytest_namespace():
return dict(xfail=xfail)
class XFailed(pytest.fail.Exception):
""" raised from an explicit call to py.test.xfail() """
def xfail(reason=""):
""" xfail an executing test or setup functions with the given reason."""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
class MarkEvaluator:
def __init__(self, item, name):
self.item = item
self.name = name
@property
def holder(self):
return self.item.keywords.get(self.name, None)
def __bool__(self):
return bool(self.holder)
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, 'exc')
def istrue(self):
try:
return self._istrue()
except KeyboardInterrupt:
raise
except:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^",]
msg.append("SyntaxError: invalid syntax")
else:
msg = py.std.traceback.format_exception_only(*self.exc[:2])
pytest.fail("Error evaluating %r expression\n"
" %s\n"
"%s"
%(self.name, self.expr, "\n".join(msg)),
pytrace=False)
def _getglobals(self):
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
func = self.item.obj
try:
d.update(func.__globals__)
except AttributeError:
d.update(func.func_globals)
return d
def _istrue(self):
if self.holder:
d = self._getglobals()
if self.holder.args:
self.result = False
for expr in self.holder.args:
self.expr = expr
if isinstance(expr, str):
result = cached_eval(self.item.config, expr, d)
else:
pytest.fail("expression is not a string")
if result:
self.result = True
self.expr = expr
break
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default)
def getexplanation(self):
expl = self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return ""
else:
return "condition: " + str(self.expr)
return expl
def pytest_runtest_setup(item):
if not isinstance(item, pytest.Function):
return
evalskip = MarkEvaluator(item, 'skipif')
if evalskip.istrue():
py.test.skip(evalskip.getexplanation())
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
def check_xfail_no_run(item):
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
py.test.xfail("[NOTRUN] " + evalxfail.getexplanation())
def pytest_runtest_makereport(__multicall__, item, call):
if not isinstance(item, pytest.Function):
return
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess'):
rep = __multicall__.execute()
if rep.when == "call":
# we need to translate into how py.test encodes xpass
rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
rep.outcome = "failed"
return rep
if not (call.excinfo and
call.excinfo.errisinstance(py.test.xfail.Exception)):
evalxfail = getattr(item, '_evalxfail', None)
if not evalxfail:
return
if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
if not item.config.getvalue("runxfail"):
rep = __multicall__.execute()
rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
return rep
rep = __multicall__.execute()
evalxfail = item._evalxfail
if not item.config.option.runxfail:
if evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
rep.outcome = "skipped"
rep.keywords['xfail'] = evalxfail.getexplanation()
elif call.when == "call":
rep.outcome = "failed"
rep.keywords['xfail'] = evalxfail.getexplanation()
return rep
if 'xfail' in rep.keywords:
del rep.keywords['xfail']
return rep
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if 'xfail' in report.keywords:
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
return "xpassed", "X", "XPASS"
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
#for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char in "fF":
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
elif char in "sS":
show_skipped(terminalreporter, lines)
elif char == "E":
show_simple(terminalreporter, lines, 'error', "ERROR %s")
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
tw = terminalreporter._tw
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = rep.nodeid
lines.append(format %(pos, ))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = rep.nodeid
reason = rep.keywords['xfail']
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = rep.nodeid
reason = rep.keywords['xfail']
lines.append("XPASS %s %s" %(pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
#import sys
#print >>sys.stderr, ("cache-miss: %r" % expr)
exprcode = py.code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
d.setdefault(key, []).append(event)
l = []
for key, events in d.items():
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
#if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
#tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append("SKIP [%d] %s:%d: %s" %
(num, fspath, lineno, reason))
| mit | -6,791,166,302,753,156,000 | 33.450185 | 80 | 0.564053 | false |
zh012/flask-dropin | docs/conf.py | 1 | 9664 | # -*- coding: utf-8 -*-
#
# Flask-DropIn documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 13 12:26:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask-DropIn'
copyright = u'2015, Jerry Zhang'
author = u'Jerry Zhang'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-DropIndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-DropIn.tex', u'Flask-DropIn Documentation',
u'Jerry Zhang', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-dropin', u'Flask-DropIn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-DropIn', u'Flask-DropIn Documentation',
author, 'Flask-DropIn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask_small'
html_theme_options = {
'index_logo': 'flask-dropin.png',
'github_fork': 'zh012/flask-dropin'
}
| mit | 2,382,465,205,383,267,300 | 31.106312 | 79 | 0.706436 | false |
clembou/PCWG | pcwg/gui/grid_box.py | 1 | 7539 |
import Tkinter as tk
import tkFont as tkFont
import ttk as ttk
from ..exceptions.handling import ExceptionHandler
class GridBox(object):
def __init__(self, master, headers, row, column):
self.master = master
self.headers = headers
self.items_dict = {}
self.tree = None
self.container = ttk.Frame(self.master)
self.container.grid(row=row, column=column, sticky=tk.W+tk.E+tk.N+tk.S)
self._set_up_tree_widget()
self._build_tree()
# create a popup menu
self.pop_menu = tk.Menu(self.tree, tearoff=0)
self.pop_menu.add_command(label="New", command=self.new)
self.pop_menu.add_command(label="Remove", command=self.remove)
self.pop_menu.add_command(label="Remove All", command=self.remove_all)
self.pop_menu.add_command(label="Edit", command=self.edit)
self.pop_menu_add = tk.Menu(self.tree, tearoff=0)
self.pop_menu_add.add_command(label="New", command=self.new)
self.pop_menu_add.add_command(label="Remove All", command=self.remove_all)
self.tree.bind("<Button-2>", self.pop_up)
self.tree.bind("<Button-3>", self.pop_up)
self.tip = None
def clearTip(self):
self.setTip("")
def setTipNotRequired(self):
self.setTip("Not Required")
def setTip(self, text):
if self.tip != None:
self.tip['text'] = text
def item_count(self):
return len(self.items_dict)
def pop_up(self, event):
item = self.tree.identify_row(event.y)
if item:
# mouse pointer over item
self.tree.selection_set(item)
self.tree.update()
self.pop_menu.post(event.x_root, event.y_root)
else:
self.pop_menu_add.post(event.x_root, event.y_root)
def get_selected_key(self):
selection = self.tree.selection()
if len(selection) > 0:
return selection[0]
else:
return None
def get_selected(self):
key = self.get_selected_key()
if key != None:
return self.items_dict[key]
else:
return None
def new(self):
pass
def get_item_values(self, item):
return {}
def edit_item(self, item):
pass
def remove_all(self):
keys = self.items_dict.keys()
for key in keys:
self.remove_item(key)
def remove_item(self, key):
del self.items_dict[key]
self.tree.delete(key)
def remove(self):
selection = self.get_selected_key()
if selection != None:
self.remove_item(selection)
def edit(self):
try:
item = self.get_selected()
if item != None:
self.edit_item(item)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "Cannot edit item")
def add_item(self, item):
values = self.get_tree_values(item)
key = self.tree.insert('', 'end', values = values)
self.items_dict[key] = item
self.adjust_width(values)
def redraw_item(self, key):
item = self.items_dict[key]
values = self.get_tree_values(item)
self.tree.item(key, text='', values=values)
self.adjust_width(values)
def adjust_width(self, values):
# adjust column's width if necessary to fit each value
for ix, val in enumerate(values):
col_w = tkFont.Font().measure(val)
if self.tree.column(self.headers[ix],width=None)<col_w:
self.tree.column(self.headers[ix], width=col_w)
def get_tree_values(self, item):
values = []
values_dict = self.get_item_values(item)
for header in self.headers:
values.append(values_dict[header])
return values
def add_items(self, items):
for item in items:
self.add_item(item)
def get_items(self):
return self.items_dict.values()
def double_click(self, event):
key = self.tree.identify('item', event.x, event.y)
if key in self.items_dict:
item = self.items_dict[key]
self.edit_item(item)
def _set_up_tree_widget(self):
tree_container = ttk.Frame(self.container)
tree_container.grid(row=0, column=0, sticky=tk.W+tk.E+tk.N+tk.S)
#tree_container.pack(fill='both', expand=True)
# create a treeview with dual scrollbars
self.tree = ttk.Treeview(tree_container, columns=self.headers, show="headings")
vsb = ttk.Scrollbar(tree_container, orient="vertical", command=self.tree.yview)
hsb = ttk.Scrollbar(tree_container, orient="horizontal", command=self.tree.xview)
self.tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)
self.tree.grid(column=0, row=0, sticky='nsew')
vsb.grid(column=1, row=0, sticky='ns')
hsb.grid(column=0, row=1, sticky='ew')
tree_container.grid_columnconfigure(0, weight=1)
tree_container.grid_rowconfigure(0, weight=1)
self.tree.bind("<Double-1>", self.double_click)
def get_header_width(self, header):
return tkFont.Font().measure(header.title()) * self.get_header_scale()
def get_header_scale(self):
return 1
def _build_tree(self):
for col in self.headers:
self.tree.heading(col, text=col.title(),
command=lambda c=col: self.sortby(self.tree, c, 0))
# adjust the column's width to the header string
self.tree.column(col, width=self.get_header_width(col))
def sortby(self, tree, col, descending):
"""sort tree contents when a column header is clicked on"""
# grab values to sort
data = [(tree.set(child, col), child) \
for child in tree.get_children('')]
# if the data to be sorted is numeric change to float
#data = change_numeric(data)
# now sort the data in place
data.sort(reverse=descending)
for ix, item in enumerate(data):
tree.move(item[1], '', ix)
# switch the heading so it will sort in the opposite direction
tree.heading(col, command=lambda col=col: self.sortby(tree, col, \
int(not descending)))
class DialogGridBox(GridBox):
def __init__(self, master, parent_dialog, row, column):
self.parent_dialog = parent_dialog
headers = self.get_headers()
GridBox.__init__(self, master, headers, row, column)
def get_headers(self):
pass
def get_item_values(self, item):
pass
def new_dialog(self, master, parent_dialog, item):
pass
def new(self):
dialog = self.new_dialog(self.master, self.parent_dialog, None)
self.add_item(dialog.item)
def edit_item(self, item):
try:
key = self.get_selected_key()
item = self.items_dict[key]
self.new_dialog(self.master, self.parent_dialog, item)
self.redraw_item(key)
except ExceptionHandler.ExceptionType as e:
ExceptionHandler.add(e, "ERROR editing item")
def remove(self):
GridBox.remove(self)
| mit | 2,690,398,291,229,796,400 | 27.026022 | 89 | 0.569041 | false |
digitalocean/netbox | netbox/extras/admin.py | 1 | 6231 | from django import forms
from django.contrib import admin
from utilities.forms import LaxURLField
from .models import CustomField, CustomLink, ExportTemplate, JobResult, Webhook
def order_content_types(field):
"""
Order the list of available ContentTypes by application
"""
queryset = field.queryset.order_by('app_label', 'model')
field.choices = [(ct.pk, '{} > {}'.format(ct.app_label, ct.name)) for ct in queryset]
#
# Webhooks
#
class WebhookForm(forms.ModelForm):
payload_url = LaxURLField(
label='URL'
)
class Meta:
model = Webhook
exclude = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if 'content_types' in self.fields:
order_content_types(self.fields['content_types'])
@admin.register(Webhook)
class WebhookAdmin(admin.ModelAdmin):
list_display = [
'name', 'models', 'payload_url', 'http_content_type', 'enabled', 'type_create', 'type_update', 'type_delete',
'ssl_verification',
]
list_filter = [
'enabled', 'type_create', 'type_update', 'type_delete', 'content_types',
]
form = WebhookForm
fieldsets = (
(None, {
'fields': ('name', 'content_types', 'enabled')
}),
('Events', {
'fields': ('type_create', 'type_update', 'type_delete')
}),
('HTTP Request', {
'fields': (
'payload_url', 'http_method', 'http_content_type', 'additional_headers', 'body_template', 'secret',
),
'classes': ('monospace',)
}),
('SSL', {
'fields': ('ssl_verification', 'ca_file_path')
})
)
def models(self, obj):
return ', '.join([ct.name for ct in obj.content_types.all()])
#
# Custom fields
#
class CustomFieldForm(forms.ModelForm):
class Meta:
model = CustomField
exclude = []
widgets = {
'default': forms.TextInput(),
'validation_regex': forms.Textarea(
attrs={
'cols': 80,
'rows': 3,
}
)
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
order_content_types(self.fields['content_types'])
@admin.register(CustomField)
class CustomFieldAdmin(admin.ModelAdmin):
actions = None
form = CustomFieldForm
list_display = [
'name', 'models', 'type', 'required', 'filter_logic', 'default', 'weight', 'description',
]
list_filter = [
'type', 'required', 'content_types',
]
fieldsets = (
('Custom Field', {
'fields': ('type', 'name', 'weight', 'label', 'description', 'required', 'default', 'filter_logic')
}),
('Assignment', {
'description': 'A custom field must be assigned to one or more object types.',
'fields': ('content_types',)
}),
('Validation Rules', {
'fields': ('validation_minimum', 'validation_maximum', 'validation_regex'),
'classes': ('monospace',)
}),
('Choices', {
'description': 'A selection field must have two or more choices assigned to it.',
'fields': ('choices',)
})
)
def models(self, obj):
return ', '.join([ct.name for ct in obj.content_types.all()])
#
# Custom links
#
class CustomLinkForm(forms.ModelForm):
class Meta:
model = CustomLink
exclude = []
widgets = {
'text': forms.Textarea,
'url': forms.Textarea,
}
help_texts = {
'weight': 'A numeric weight to influence the ordering of this link among its peers. Lower weights appear '
'first in a list.',
'text': 'Jinja2 template code for the link text. Reference the object as <code>{{ obj }}</code>. Links '
'which render as empty text will not be displayed.',
'url': 'Jinja2 template code for the link URL. Reference the object as <code>{{ obj }}</code>.',
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Format ContentType choices
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(CustomLink)
class CustomLinkAdmin(admin.ModelAdmin):
fieldsets = (
('Custom Link', {
'fields': ('content_type', 'name', 'group_name', 'weight', 'button_class', 'new_window')
}),
('Templates', {
'fields': ('text', 'url'),
'classes': ('monospace',)
})
)
list_display = [
'name', 'content_type', 'group_name', 'weight',
]
list_filter = [
'content_type',
]
form = CustomLinkForm
#
# Export templates
#
class ExportTemplateForm(forms.ModelForm):
class Meta:
model = ExportTemplate
exclude = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Format ContentType choices
order_content_types(self.fields['content_type'])
self.fields['content_type'].choices.insert(0, ('', '---------'))
@admin.register(ExportTemplate)
class ExportTemplateAdmin(admin.ModelAdmin):
fieldsets = (
('Export Template', {
'fields': ('content_type', 'name', 'description', 'mime_type', 'file_extension')
}),
('Content', {
'fields': ('template_code',),
'classes': ('monospace',)
})
)
list_display = [
'name', 'content_type', 'description', 'mime_type', 'file_extension',
]
list_filter = [
'content_type',
]
form = ExportTemplateForm
#
# Reports
#
@admin.register(JobResult)
class JobResultAdmin(admin.ModelAdmin):
list_display = [
'obj_type', 'name', 'created', 'completed', 'user', 'status',
]
fields = [
'obj_type', 'name', 'created', 'completed', 'user', 'status', 'data', 'job_id'
]
list_filter = [
'status',
]
readonly_fields = fields
def has_add_permission(self, request):
return False
| apache-2.0 | -5,184,452,310,745,300,000 | 26.091304 | 118 | 0.540363 | false |
QISKit/qiskit-sdk-py | qiskit/extensions/standard/s.py | 1 | 2513 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
S=diag(1,i) Clifford phase gate or its inverse.
"""
import numpy
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
from qiskit.qasm import pi
from qiskit.extensions.standard.u1 import U1Gate
class SGate(Gate):
"""S=diag(1,i) Clifford phase gate."""
def __init__(self, label=None):
"""Create new S gate."""
super().__init__("s", 1, [], label=label)
def _define(self):
"""
gate s a { u1(pi/2) a; }
"""
definition = []
q = QuantumRegister(1, "q")
rule = [
(U1Gate(pi/2), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return SdgGate()
def to_matrix(self):
"""Return a Numpy.array for the S gate."""
return numpy.array([[1, 0],
[0, 1j]], dtype=complex)
class SdgGate(Gate):
"""Sdg=diag(1,-i) Clifford adjoint phase gate."""
def __init__(self, label=None):
"""Create new Sdg gate."""
super().__init__("sdg", 1, [], label=label)
def _define(self):
"""
gate sdg a { u1(-pi/2) a; }
"""
definition = []
q = QuantumRegister(1, "q")
rule = [
(U1Gate(-pi/2), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate."""
return SGate()
def to_matrix(self):
"""Return a Numpy.array for the Sdg gate."""
return numpy.array([[1, 0],
[0, -1j]], dtype=complex)
def s(self, q):
"""Apply S to q."""
return self.append(SGate(), [q], [])
def sdg(self, q):
"""Apply Sdg to q."""
return self.append(SdgGate(), [q], [])
QuantumCircuit.s = s
QuantumCircuit.sdg = sdg
| apache-2.0 | -7,865,258,483,430,124,000 | 24.383838 | 77 | 0.564266 | false |
quantumlib/Cirq | cirq-core/cirq/sim/simulator_base.py | 1 | 12429 | # Copyright 2021 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batteries-included class for Cirq's built-in simulators."""
import abc
import collections
from typing import (
Any,
Dict,
Iterator,
List,
Tuple,
TYPE_CHECKING,
cast,
Generic,
Type,
Sequence,
Optional,
)
import numpy as np
from cirq import circuits, ops, protocols, study, value, devices
from cirq.sim import ActOnArgsContainer
from cirq.sim.operation_target import OperationTarget
from cirq.sim.simulator import (
TStepResult,
TSimulationTrialResult,
TSimulatorState,
TActOnArgs,
SimulatesIntermediateState,
SimulatesSamples,
check_all_resolved,
split_into_matching_protocol_then_general,
)
if TYPE_CHECKING:
import cirq
class SimulatorBase(
Generic[TStepResult, TSimulationTrialResult, TSimulatorState, TActOnArgs],
SimulatesIntermediateState[TStepResult, TSimulationTrialResult, TSimulatorState, TActOnArgs],
SimulatesSamples,
metaclass=abc.ABCMeta,
):
"""A base class for the built-in simulators.
Most implementors of this interface should implement the
`_create_partial_act_on_args` and `_create_step_result` methods. The first
one creates the simulator's quantum state representation at the beginning
of the simulation. The second creates the step result emitted after each
`Moment` in the simulation.
Iteration in the subclass is handled by the `_core_iterator` implementation
here, which handles moment stepping, application of operations, measurement
collection, and creation of noise. Simulators with more advanced needs can
override the implementation if necessary.
Sampling is handled by the implementation of `_run`. This implementation
iterates the circuit to create a final step result, and samples that
result when possible. If not possible, due to noise or classical
probabilities on a state vector, the implementation attempts to fully
iterate the unitary prefix once, then only repeat the non-unitary
suffix from copies of the state obtained by the prefix. If more advanced
functionality is required, then the `_run` method can be overridden.
Note that state here refers to simulator state, which is not necessarily
a state vector. The included simulators and corresponding states are state
vector, density matrix, Clifford, and MPS. Each of these use the default
`_core_iterator` and `_run` methods.
"""
def __init__(
self,
*,
dtype: Type[np.number] = np.complex64,
noise: 'cirq.NOISE_MODEL_LIKE' = None,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
ignore_measurement_results: bool = False,
split_untangled_states: bool = False,
):
"""Initializes the simulator.
Args:
dtype: The `numpy.dtype` used by the simulation.
noise: A noise model to apply while simulating.
seed: The random seed to use for this simulator.
ignore_measurement_results: If True, then the simulation
will treat measurement as dephasing instead of collapsing
process. This is only applicable to simulators that can
model dephasing.
split_untangled_states: If True, optimizes simulation by running
unentangled qubit sets independently and merging those states
at the end.
"""
self._dtype = dtype
self._prng = value.parse_random_state(seed)
self.noise = devices.NoiseModel.from_noise_model_like(noise)
self._ignore_measurement_results = ignore_measurement_results
self._split_untangled_states = split_untangled_states
@abc.abstractmethod
def _create_partial_act_on_args(
self,
initial_state: Any,
qubits: Sequence['cirq.Qid'],
logs: Dict[str, Any],
) -> TActOnArgs:
"""Creates an instance of the TActOnArgs class for the simulator.
It represents the supplied qubits initialized to the provided state.
Args:
initial_state: The initial state to represent. An integer state is
understood to be a pure state. Other state representations are
simulator-dependent.
qubits: The sequence of qubits to represent.
logs: The structure to hold measurement logs. A single instance
should be shared among all ActOnArgs within the simulation.
"""
@abc.abstractmethod
def _create_step_result(
self,
sim_state: TActOnArgs,
qubit_map: Dict['cirq.Qid', int],
) -> TStepResult:
"""This method should be implemented to create a step result.
Args:
sim_state: The TActOnArgs for this trial.
qubit_map: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
Returns:
The StepResult.
"""
def _can_be_in_run_prefix(self, val: Any):
"""Determines what should be put in the prefix in `_run`
The `_run` method has an optimization that reduces repetition by
splitting the circuit into a prefix that is pure with respect to the
state representation, and only executing that once per sample set. For
state vectors, any unitary operation is pure, and we make this the
default here. For density matrices, any non-measurement operation can
be represented wholely in the matrix, and thus this method is
overridden there to enable greater optimization there.
Custom simulators can override this method appropriately.
Args:
val: An operation or noise model to test for purity within the
state representation.
Returns:
A boolean representing whether the value can be added to the
`_run` prefix."""
return protocols.has_unitary(val)
def _core_iterator(
self,
circuit: circuits.Circuit,
sim_state: OperationTarget[TActOnArgs],
all_measurements_are_terminal: bool = False,
) -> Iterator[TStepResult]:
"""Standard iterator over StepResult from Moments of a Circuit.
Args:
circuit: The circuit to simulate.
sim_state: The initial args for the simulation. The form of
this state depends on the simulation implementation. See
documentation of the implementing class for details.
Yields:
StepResults from simulating a Moment of the Circuit.
"""
if len(circuit) == 0:
step_state = sim_state.create_merged_state()
yield self._create_step_result(step_state, step_state.qubit_map)
return
noisy_moments = self.noise.noisy_moments(circuit, sorted(circuit.all_qubits()))
measured: Dict[Tuple['cirq.Qid', ...], bool] = collections.defaultdict(bool)
for moment in noisy_moments:
for op in ops.flatten_to_ops(moment):
try:
# TODO: support more general measurements.
# Github issue: https://github.com/quantumlib/Cirq/issues/3566
# Preprocess measurements
if all_measurements_are_terminal and measured[op.qubits]:
continue
if isinstance(op.gate, ops.MeasurementGate):
measured[op.qubits] = True
if all_measurements_are_terminal:
continue
if self._ignore_measurement_results:
op = ops.phase_damp(1).on(*op.qubits)
# Simulate the operation
sim_state.apply_operation(op)
except TypeError:
raise TypeError(f"{self.__class__.__name__} doesn't support {op!r}")
step_state = sim_state.create_merged_state()
yield self._create_step_result(step_state, step_state.qubit_map)
step_state.log_of_measurement_results.clear()
def _run(
self, circuit: circuits.Circuit, param_resolver: study.ParamResolver, repetitions: int
) -> Dict[str, np.ndarray]:
"""See definition in `cirq.SimulatesSamples`."""
if self._ignore_measurement_results:
raise ValueError("run() is not supported when ignore_measurement_results = True")
param_resolver = param_resolver or study.ParamResolver({})
resolved_circuit = protocols.resolve_parameters(circuit, param_resolver)
check_all_resolved(resolved_circuit)
qubits = tuple(sorted(resolved_circuit.all_qubits()))
act_on_args = self._create_act_on_args(0, qubits)
prefix, general_suffix = (
split_into_matching_protocol_then_general(resolved_circuit, self._can_be_in_run_prefix)
if self._can_be_in_run_prefix(self.noise)
else (resolved_circuit[0:0], resolved_circuit)
)
step_result = None
for step_result in self._core_iterator(
circuit=prefix,
sim_state=act_on_args,
):
pass
general_ops = list(general_suffix.all_operations())
if all(isinstance(op.gate, ops.MeasurementGate) for op in general_ops):
for step_result in self._core_iterator(
circuit=general_suffix,
sim_state=act_on_args,
all_measurements_are_terminal=True,
):
pass
assert step_result is not None
measurement_ops = [cast(ops.GateOperation, op) for op in general_ops]
return step_result.sample_measurement_ops(measurement_ops, repetitions, seed=self._prng)
measurements: Dict[str, List[np.ndarray]] = {}
for i in range(repetitions):
all_step_results = self._core_iterator(
general_suffix,
sim_state=act_on_args.copy() if i < repetitions - 1 else act_on_args,
)
for step_result in all_step_results:
for k, v in step_result.measurements.items():
if k not in measurements:
measurements[k] = []
measurements[k].append(np.array(v, dtype=np.uint8))
return {k: np.array(v) for k, v in measurements.items()}
def _create_act_on_args(
self,
initial_state: Any,
qubits: Sequence['cirq.Qid'],
) -> OperationTarget[TActOnArgs]:
if isinstance(initial_state, OperationTarget):
return initial_state
log: Dict[str, Any] = {}
if self._split_untangled_states:
args_map: Dict[Optional['cirq.Qid'], TActOnArgs] = {}
if isinstance(initial_state, int):
for q in reversed(qubits):
args_map[q] = self._create_partial_act_on_args(
initial_state=initial_state % q.dimension,
qubits=[q],
logs=log,
)
initial_state = int(initial_state / q.dimension)
else:
args = self._create_partial_act_on_args(
initial_state=initial_state,
qubits=qubits,
logs=log,
)
for q in qubits:
args_map[q] = args
args_map[None] = self._create_partial_act_on_args(0, (), log)
return ActOnArgsContainer(args_map, qubits, self._split_untangled_states, log)
else:
return self._create_partial_act_on_args(
initial_state=initial_state,
qubits=qubits,
logs=log,
)
| apache-2.0 | 7,946,348,196,334,659,000 | 39.223301 | 100 | 0.620323 | false |
lelandbatey/defuse_division | defusedivision/game.py | 1 | 9622 | import logging
import random
import curses
import queue
from .minesweeper.minefield import MineField
from .minesweeper.contents import Contents
class Conveyor(object):
"""
Abstract class Conveyor describes the basic contract for communicating about games of Minesweeper.
"""
def get_state(self):
raise NotImplementedError
def send_input(self, inpt):
raise NotImplementedError
class Keys:
UP = 'UP'
DOWN = 'DOWN'
LEFT = 'LEFT'
RIGHT = 'RIGHT'
PROBE = 'PROBE'
FLAG = 'FLAG'
DIRECTIONKEYS = [Keys.UP, Keys.DOWN, Keys.LEFT, Keys.RIGHT]
def _move_select(direction, field):
"""
Function _move_select changes the 'selected' field of a MineField depending
on the direction provided. 'direction' must be a curses.KEY_* instance,
either UP, DOWN, LEFT, or RIGHT. If moving the selected cell would move to
an out of bounds position, we do nothing.
"""
startloc = field.selected
delta = [0, 0]
if direction == Keys.UP:
delta = [0, -1]
elif direction == Keys.DOWN:
delta = [0, 1]
elif direction == Keys.RIGHT:
delta = [1, 0]
elif direction == Keys.LEFT:
delta = [-1, 0]
# Filter out-of-bounds deltas
x, y = startloc
nx, ny = [x + delta[0], y + delta[1]]
if nx < 0 or nx >= field.width:
nx = x
if ny < 0 or ny >= field.height:
ny = y
field.selected = [nx, ny]
def create_foothold(field):
"""
Function create_foothold will remove mines from around the currently
selected cell, ensuring that the current cell cannot have a mine, and that
probing that cell will open up some amount of space.
"""
x, y = field.selected
cell = field.board[x][y]
moved_count = 0
safe_cells = [v for _, v in cell.neighbors.items() if v]
safe_cells += [cell]
for neighbor in safe_cells:
if neighbor.contents == Contents.mine:
neighbor.contents = Contents.empty
moved_count += 1
# Place a new mine for each of the mines we had to move out of the way
while moved_count > 0:
rx, ry = random.randint(0, field.width - 1), random.randint(
0, field.height - 1)
possible_mine = field.board[rx][ry]
# Ensure any new location won't be in the desired foothold
if not possible_mine in safe_cells:
# Only place mines where there aren't existing mines
if not possible_mine.contents == Contents.mine:
possible_mine.contents = Contents.mine
moved_count -= 1
def _first_probe(field):
"""
Function _first_probe checks if this is the first probe of any cell in this
minefield, returning True if it is the first probe, and False if it's not.
"""
cells = [c for row in field.board for c in row]
for cell in cells:
if cell.probed:
return False
return True
def _probe_selected(field):
"""
Function _probe_selected probes the currently selected cell. If the
cell if flagged, ignore probe and return True immediately. If the
probed cell contains a mine, return False, otherwise, returns True.
"""
x, y = field.selected
cell = field.board[x][y]
if cell.flagged:
return True
# Create a foothold for the first probe
if _first_probe(field):
create_foothold(field)
cell.probe()
if cell.contents == Contents.mine:
return False
return True
def _flag_selected(field):
x, y = field.selected
cell = field.board[x][y]
cell.flagged = not cell.flagged
def check_win(mfield):
flags = 0
correct_flags = 0
for h in range(mfield.height):
for w in range(mfield.width):
c = mfield.board[w][h]
if c.contents == Contents.mine and c.flagged:
correct_flags += 1
if c.flagged:
flags += 1
if correct_flags == mfield.mine_count and flags == correct_flags:
return True
return False
class Player(Conveyor):
"""
Class Player contains the minefield that a particular player is playing
against, as well as passthrough-methods to send input to a parent Bout.
"""
def __init__(self, name, bout, mine_count=None, height=None, width=None):
# self._args = args
self.name = name
self.bout = bout
self.stateq = queue.Queue()
self.mfield = MineField(
height=height, width=width, mine_count=mine_count)
self.living = True
self.victory = False
def send_input(self, inpt):
# Just pass the input to the parent bout, but with info saying that
# this input comes from this player
self.bout.send_input({'player': self.name, 'input': inpt})
def get_state(self):
return self.stateq.get()
def json(self):
return {
'name': self.name,
'living': self.living,
'minefield': self.mfield.json(),
'victory': self.victory,
}
class Bout(object):
"""
Class Bout holds information on the state of the game (won/lost) as well as
all the players playing currently.
`player_constructor` is a callable which accepts the same arguments as
class `Player`, and returns a `Player`-like object. Allows a Bout to use a
Player which gets it's input from anywhere.
"""
def __init__(self,
max_players=2,
minefield_size=(12, 12),
mine_count=None,
player_constructor=None):
self.max_players = max_players
self.minefield_size = minefield_size
self.mine_count = mine_count
self.players = dict()
self.ready = False
if player_constructor is None:
player_constructor = Player
self.player_constructor = player_constructor
def send_input(self, inpt_event):
'''
Method send_input is the final stop for an inpt_event, as those events
are used here by the Bout to modify the state of the game.
'''
player = self.players[inpt_event['player']]
field = player.mfield
inpt = inpt_event['input']
if isinstance(inpt, dict):
# Change the name of a player
if 'change-name' in inpt:
newname = inpt['change-name']
while newname in self.players:
newname = newname + str(random.randint(0, 100))
oldname = player.name
logging.info('Changing player name from: "{}" to "{}"'.format(
oldname, newname))
player.name = newname
self.players[newname] = player
del self.players[oldname]
if 'new-minefield' in inpt:
info = inpt['new-minefield']
height = info['height']
width = info['width']
mine_count = info['mine_count']
new_mfield = MineField(
height=height, width=width, mine_count=mine_count)
player.mfield = new_mfield
if inpt in DIRECTIONKEYS:
_move_select(inpt, field)
self._push_selected(player.name, field.selected)
return
if inpt == Keys.PROBE:
if not _probe_selected(field):
player.living = False
if inpt == Keys.FLAG:
_flag_selected(field)
if check_win(field):
player.victory = True
self._push_state()
def _push_state(self):
'''
Method _push_state put's the state of this bout into every Player's
stateq.
'''
for _, v in self.players.items():
v.stateq.put(('new-state', self.json()))
def _push_selected(self, playername, selected):
'''
Method _push_selected pushes a state to all Players updating one
players selected position.
'''
for _, v in self.players.items():
v.stateq.put(('update-selected', (playername, selected)))
def add_player(self):
'''
Method add_player creates a new player object for this Bout, and
returns a reference to that player. If there are already
self.max_players players set to play in this bout, then returns None.
'''
if self.max_players <= len(self.players):
return None
pname = "Player{}-{}".format(
len(self.players) + 1, random.randint(0, 10000))
width, height = self.minefield_size
player = self.player_constructor(
pname,
self,
mine_count=self.mine_count,
height=height,
width=width)
self.players[pname] = player
logging.info('Adding player: "{}" {}'.format(pname, player))
if len(self.players) >= self.max_players:
self.ready = True
self._push_state()
return player
def remove_player(self, playername):
'''
Method remove_player removes a player with the given name from this
Bout's collection of players. If no player exists with the given name,
does nothing.
'''
logging.info('Removing player: "{}"'.format(playername))
if playername in self.players:
del self.players[playername]
if len(self.players) < self.max_players:
self.ready = False
self._push_state()
def json(self):
jplayers = {k: v.json() for k, v in self.players.items()}
return {"players": jplayers, 'ready': self.ready}
| gpl-3.0 | -569,778,188,004,578,700 | 30.34202 | 102 | 0.585014 | false |
beni55/rinohtype | rinohlib/templates/article.py | 1 | 1907 |
from rinoh.document import DocumentSection
from rinoh.paragraph import Paragraph
from rinoh.structure import GroupedFlowables
from .base import (ContentsPart, DocumentBase, DocumentOptions,
TableOfContentsSection)
class ArticleFrontMatter(GroupedFlowables):
def __init__(self):
self.toc_section = TableOfContentsSection()
super().__init__()
def prepare(self, document):
self.toc_section.prepare(document)
def flowables(self, document):
meta = document.metadata
yield Paragraph(meta['title'], style='title')
if 'subtitle' in meta:
yield Paragraph(meta['subtitle'], style='subtitle')
if 'date' in meta:
date = meta['date']
try:
yield Paragraph(date.strftime('%B %d, %Y'), style='author')
except AttributeError:
yield Paragraph(date, style='author')
if 'author' in meta:
yield Paragraph(meta['author'], style='author')
if document.options['table_of_contents']:
yield self.toc_section
# document parts
# ----------------------------------------------------------------------------
class ArticlePart(ContentsPart):
def __init__(self, document_section):
self.front_matter = ArticleFrontMatter()
super().__init__(document_section)
def prepare(self):
self.front_matter.prepare(self.document)
def flowables(self):
yield self.front_matter
for flowable in super().flowables():
yield flowable
class ArticleSection(DocumentSection):
parts = [ArticlePart]
# main document
# ----------------------------------------------------------------------------
class ArticleOptions(DocumentOptions):
options = {'table_of_contents': True}
class Article(DocumentBase):
sections = [ArticleSection]
options_class = ArticleOptions
| agpl-3.0 | -5,709,940,673,110,627,000 | 28.338462 | 78 | 0.588359 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.